var/home/core/zuul-output/0000755000175000017500000000000015136402015014522 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136420631015472 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000334760115136420511020262 0ustar corecoreI!zikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD  >YI_翪|mvşo#oVݏKf+ovpZjl!Kޒ/h3_.gSeq5v(×_~^ǿq]n>߮}+ԏbś E^"Y^-Vۋz7wH׋0g"ŒGǯguz|ny;#)a "b BLc?^^4[ftlR%KF^j 8DΆgS^Kz۞_W#|`zIlp_@oEy5 fs&2x*g+W4m ɭiE߳Kfn!#Šgv cXk?`;'`&R7߿YKS'owHF6":=3Ȑ 3xҝd){Ts}cZ%BdARO#-o"D"ޮrFg4" 0ʡPBU[fi;dYu' IAgfPF:c0Ys66q tH6#.`$vlLH}ޭA㑝V0>|J\Pg\W#NqɌDSd1d9nT#Abn q1J# !8,$RNI? j!bE"o j/o\E`r"hA ós yi\[.!=A(%Ud,QwC}F][UVYE NQGn0Ƞɻ>.ww}(_?u '~qlN?}|nLFR6f8yWxYd ;K44|CK4UQviYDZh$#*)e\W$IAT;s0Gp}=9ڠedۜ+EaH#QtDV:?7#w4r_۾8ZJ%PgS!][5ߜQZ݇~- MR9z_Z;57xh|_/CWuU%v[_((G yMi@'3Pmz8~Y >hl%}Р`sMC77Aztԝp ,}Nptt%q6& ND lM;ָPZGa(X(2*91n,50/mx'})')SĔv}S%xhRe)a@r AF' ]J)ӨbqMWNjʵ2PK-guZZg !M)a(!H/?R?Q~}% ;]/ľv%T&hoP~(*טj=dߛ_SRzSa™:']*}EXɧM<@:jʨΨrPE%NT&1H>g":ͨ ҄v`tYoTq&OzcP_k(PJ'ήYXFgGہwħkIM*򸆔l=q VJީ#b8&RgX2qBMoN w1ђZGd m 2P/Ɛ!" aGd;0RZ+ 9O5KiPc7CDG.b~?|ђP? -8%JNIt"`HP!]ZrͰ4j8!*(jPcǷ!)'xmv>!0[r_G{j 6JYǹ>zs;tc.mctie:x&"bR4S uV8/0%X8Ua0NET݃jYAT` &AD]Ax95mvXYs"(A+/_+*{b }@UP*5ì"M|܊W7|}N{mL=d]' =MS2[3(/hoj$=Zm Mlh>P>Qwf8*c4˥Ęk(+,«.c%_~&^%80=1Jgͤ39(&ʤdH0Ζ@.!)CGt?~=ˢ>f>\bN<Ⱦtë{{b2hKNh`0=/9Gɺɔ+'Х[)9^iX,N&+1Id0ֶ|}!oѶvhu|8Qz:^S-7;k>U~H><~5i ˿7^0*]h,*aklVIKS7d'qAWEݰLkS :}%J6TIsbFʶ褢sFUC)(k-C"TQ[;4j39_WiZSس:$3w}o$[4x:bl=pd9YfAMpIrv̡}XI{B%ZԎuHvhd`Η|ʣ)-iaE';_j{(8xPA*1bv^JLj&DY3#-1*I+g8a@(*%kX{ Z;#es=oi_)qb㼃{buU?zT u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$GZaPo|P5q @3ǟ6 mR!c/24مQNֆ^n,hU֝cfT :):[gCa?\&IpW$8!+Uph*/ o/{")qq҈78݇hA sTB*F$6 2C` |ɧJ~iM cO;m#NV?d?TCg5otޔC1s`u.EkB6ga׬9J2&vV,./ӐoQJ*Dw*^sCeyWtɖ9F.[-cʚmD (QMW`zP~n"U'8%kEq*Lr;TY *BCCpJhxUpܺDoGdlaQ&8#v| (~~yZ-VW"T- 0@4ޙ-did˥]5]5᪩QJlyIPESZȰ<'$V"dwEш%|5zW]/S)Zb& p$^(2IцIIouzlpyXڈ2ͤh}KˢTZ {54J))??$ c9*%WyΈ W\OUkbo.22G *YDYf_7wmnK!9^B'`#hLa7ʥv>7 _z+$~ؗwz-M$VbήdKOw2}mb]E7imN407jD|tLD3 $yOS& L&s%B!Ms˘RzE׷2K0pR0͙ntV)̍F$X8a-bp)5, BoDw_nעm0}q4h:Lx%h urJp)>I(>z`{|tuB"8#YkrZ3.`h(uemjI'iGħ1 {\FPG"$$ {+!˨?E' =B~edA \r!٬ãgef=P#)ogVR2q뭲gRְ08tirPo qNUӛ<|ۈ$m뫷3`-s9zz[~ IϭR--HI4ICZ5!N&D[uiXk&2Lg&Ս7/6_cd쿼d@eհ t}\t3^W'8>JJ>EF&k 5cuQum3“QLL FN+\r}Irb7Y,"~CҰl~bƪ x`~tZoO__ lW#m@Uo#wYfK ̥ 9mGb&0[eM kM*s@ -pydwqZr[.A~@Ve Ȇ*dXsl o[:d 욢~~Mn]7EU/cl6 pRpvSGvm.wtZ6]*1a6f~|YT!~ P۵o n8Fk邗`y~n_ǰ+a9t_jj෮9E.X{ԛmYt =>0AMUk{Gt-Ls|S i0FPM_gjl}>cE_k{A@z|Vv lKvymkç.?oK`xRgR*EMPmT`-K?~:|Bص5Xpw*t]r@ 5+Ż۱w P񷍫 _ GSPC?]M҅yY) fK|Z,^ZH–vlE__vU P /".wq`iAj^=B~1-b }gAn=9]3fʮx!V l(7=T#Go ^ƒA"ZG[vBt) @bcE @mnº-ehW!>$_0||_Ȇj]7ovmqR"漐حlCp Ѡ/^CzjEꐏMm%nW2{ bg|_۪,ײl_,۱ZoK)滲 Rnm 'G̦_O-xRb6Hۗ$]cW'dtRW5EXw>h+@]*pp桸%n5ĴBըmu |VX~q#rE YoۯOi5 7#`VCpᇽmpMtWk0 ?\3-P ]!kx ]a/D}l"\I5UpЗ҃T~S,7錓"EF MnKs^C6B U7U "5\;F" wctعmBܮ7GjGھ1:T >T`8"jѡ.6ɖ@ޯ#WMt{xH%AdmWXl7L^T!~߿f->QUbsh]]{eޠJC])"K{vgY+Pf7?s#bNd#+SDL ,FZ <1Kx&c!{P|Ղr,*3] ϻ{.ؠ*6 _ 7:rVGT;}&vja$9gPSO`p'Z?јhaӟo>uZLm8png dVj!p2լu>T QyB422|2d5K: `Bcz|lRxƆ.J< ƜYNszb Ќzr?hbB.enO|PMBxapc0|>,5i58W]Þ>!Q),6*-ϯi* ᝛VšPİһ8hBʼna^mX%SHR Fp)$J7A3&ojt/68uK͂iΙY^mq&C OD-\ n4fuc:L7k4x8WNDeUc.)#udnz$| _3VluMvxyRC%ƚq5Ы/ۙw* cVo-1뜆~ZYfN"zĘEk1y[鶧rgqUFFW>'ǓC~UEյKJ $oa"$d#HXu`lfmi3=nu} uyxUF<\fk]|&ؤ^[%&%LI<2(씖?4A\`TQ.b0NHݹ/n - !: Jq#Bh4~aq >o\q!<&J[&J,4M:=6.+;$v6"I7%#CLTLyi{+ɨ^^Ra6V~]7BoK&9> iw?U.=Q0+ňxRmQ\lZSm_ Ҁ:Rg;(~s2; 5}hp B}0}nKXSM'5M~OǨIqǍ-Jm?ʅs}m5X%Qr 6 JHӖ}dDqL&RΖd}c/"ɭex^k$# $V :]PGszy!EZ]DaUS@''mhSt6"+ҶT M6rN+LxE>^DݮEڬTk1+trǴ5RHİ{qJ\}X` >+%ni3+(0m8HЭ*zAep!*)jxG:Up~gfu#x~ .2ןGRLIۘT==!TlN3ӆv%#oV}N~ˊc,_,=COU C],Ϣa!L}sy}u\0U'&2ihbvz=.ӟk ez\ƚO; -%M>AzzGvݑT58ry\wW|~3Ԟ_f&OC"msht: rF<SYi&It1!ʐDN q$0Y&Hv]9Zq=N1/u&%].]y#z18m@n1YHR=53hHT( Q(e@-#!'^AK$wTg1!H$|HBTf̋ Y@Mwq[Fī h[W,Ê=j8&d ԋU.I{7O=%iG|xqBչ̋@1+^.r%V12, _&/j"2@+ wm 4\xNtˆ;1ditQyc,m+-!sFn#y 9D*A$$"^)dVQ.(rO6ӟZw_Ȣaޒu'- ^_,G;U\cAAz7EtlLuoXuA}bT2H_*kIG?S(קjhg 5EF5uKkBYx-qCfqsn[?_r=V:х@mfVg,w}QJUtesYyt7Yr+"*DtO/o۷~|hw^5wE of7cꃱ.)7.u/}tPTGc 5tW> l/`I~>|灹mQ$>N |gZ ͜IH[RNOMTq~g d0/0Љ!yB.hH׽;}VLGp3I#8'xal&Ȑc$ d7?K6xAH1H#:f _tŒ^ hgiNas*@K{7tH*t쬆Ny497ͩ KVsVokwW&4*H'\ d$]Vmr달v9dB.bq:__xW|1=6 R3y^ E#LB ZaZd1,]ןkznxtK|v+`VZ3JϧC^|/{ś}r3 >6׳oƄ%VDSWn 0,qh! E-Z%ܹpU:&&fX+EǬ.ťqpNZܗÅxjsD|[,_4EqgMƒK6f/FXJRF>i XʽAQGwG%mgo 恤hˍJ_SgskwI\t`ﶘ080ƱQŀllKX@116fqo>NrU Ѣ9*|ãeeH7.z!<7zG4p9tV|̢T`˖E ;;,tTaIUle*$!>*mBA2,gJIn_kSz)JC]?X(OPJS3.}clݨ{e!MB,cB߮4af祋,1/_xq=fBRO0P'֫-kbM6Apw,GO2}MGK'#+սE^dˋf6Y bQEz}eҏnr_ ^O^W zw~Ȳ=sXअy{E|\΋"?|NKfֱn !¶6!=?8[Y|-ɬeǪzd;-s~CM>e:9[_v~\:P ؇'k01Q1jlX)/ΏL+NhBUx~Ga>Z"Q_wjTLRˀtL L+BT҂ll魳cf[L̎`;rK+S- (J[(6 b F? ZvƂcW+dˍ-m𢛲@ms~}3ɱ© R$ T5%:zZ甎܋)`ŰJ38!;NfHohVbK :S50exU}W`upHЍE_fNTU*q%bq@/5q0);F74~'*z[\M-~#aSmMÉB2Nnʇ)bAg`u2t"8U [tJYSk, "vu\h1Yhl~[mhm+F(g 6+YtHgd/}7m]Q!Mę5bR!JbV>&w6οH+NL$]p>8UU>Ѫg39Yg>OF9V?SAT~:gGt $*}aQ.Zi~%K\rfm$%ɪq(%W>*Hg>KStE)KS1z2"h%^NEN?  hxnd/)O{,:خcX1nIaJ/t4J\bƀWc-d4M^d/ ʂK0`v%"s#PCoT/*,:[4b=]N&, ,B82^WK9EHLPm))2.9ȱ  QAcBC-|$M\^B!`}M^t+C~Lb }D>{N{Vt)tpDN,FCz~$)*417l;V iэ(_,j]$9O+/Sh]ice wy\Mڗ$,DJ|lj*à␻,?XAe0bX@ h0[}BU0v']#Vo !ې: Z%ƶ(fl>'"Bg< 0^_d0Y@2!ӸfZ{Ibi/^cygwדzY'Ź$:fr;)ٔf ՠ3Kcxwg*EQU{$Sڸ3x~ 5clgSAW"X Pҿ.ظwyV}̒KX9U1>V..W%GX +Uvzg=npu{do#Vb4ra\sNC/T"*!k愨}plm@+@gSUX覽t01:)6kSL9Ug6rEr(3{ xRP8_S( $?uk| ]bP\vۗ晋cgLz2r~MMp!~~h?ljUc>rw}xxݸǻ*Wu{}M?\GSߋ2ꮺ5w"7U0)lۨB0ח*zW߬V}Z۫ܨJ<]B=\>V7¯8nq~q?A-?T_qOq?5-3 |q|w.dަ'/Y?> (<2y. ">8YAC| w&5fɹ(ȊVã50z)la.~LlQx[b&Pĥx BjIKn"@+z'}ũrDks^F\`%Di5~cZ*sXLqQ$q6v+jRcepO}[ s\VF5vROq%mX-RÈlб 6jf/AfN vRPػ.6<'"6dv .z{I>|&ׇ4Ăw4 [P{]"}r1殲)ߚA 2J1SGpw>ٕQѱ vb;pV ^WO+į1tq61W vzZ U'=҅}rZ:T#\_:ď);KX!LHuQ (6c94Ce|u$4a?"1] `Wa+m𢛲`Rs _I@U8jxɕͽf3[Pg%,IR Ř`QbmүcH&CLlvLҼé1ivGgJ+u7Τ!ljK1SpHR>:YF2cU(77eGG\ m#Tvmە8[,)4\\=V~?C~>_) cxF;;Ds'n [&8NJP5H2Զj{RC>he:ա+e/.I0\lWoӊĭYcxN^SPiMrFI_"*l§,̀+ å} .[c&SX( ( =X?D5ۙ@m cEpR?H0F>v6A*:W?*nzfw*B#d[se$U>tLNÔ+XX߇`cu0:U[tp^}{>H4z 4 (DtH-ʐ?sk7iIbΏ%T}v}e{aBs˞L=ilNeb]nltwfCEI"*S k`u ygz[~S [j3+sE.,uDΡ1R:Vݐ/CBc˾] shGՙf 2+);W{@dlG)%عF&4D&u.Im9c$A$Dfj-ء^6&#OȯTgرBӆI t[ 5)l>MR2ǂv JpU1cJpրj&*ߗEЍ0U#X) bpNVYSD1౱UR}UR,:lơ2<8"˓MlA2 KvP8 I7D Oj>;V|a|`U>D*KS;|:xI/ió21׭ȦS!e^t+28b$d:z4 .}gRcƈ^ʮC^0l[hl"য*6 ny!HQ=GOf"8vAq&*țTOWse~ (5TX%/8vS:w}[ą qf2Lυi lm/+QD4t.P*2V J`\g2%tJ4vX[7g"z{1|\*& >Vv:V^S7{{u%[^g=pn]Y#&ߓTί_z7e&ӃCx;xLh+NOEp";SB/eWٹ`64F 2AhF{Ɩ;>87DǍ-~e;\26Lة:*mUAN=VޮL> jwB}ѹ .MVfz0Ïd0l?7- }|>TT%9d-9UK=&l&~g&i"L{vrQۻou}q}hn+.{pWEqws]]|/ǫ\}/J.MLmc ԗWrU}/Ǜ+sYn[ﯾeywyY]]¨Kpx c./mo;ߟRy*4݀wm&8֨Or4 &+Bs=8'kP 3 |}44S8UXi;f;VE7e4AdX-fS烠1Uܦ$lznlq"җ^s RTn|RKm;ԻZ3)`S!9| ?}m*2@"G{yZ${˪A6yq>Elq*E< NX9@: Ih~|Y4sopp|v1f2춓t$?mIP^r&LƎ7TT5 )YZ$N~n4n=ZBkA-byGIXRdxד^; Sdd+Cj9eU(c'40V(x(J E^=66衍JBqFk-(ys+8+n/yRI1`*7PњHx<xRc5eTLve|]c5V$Lª`Uq-ieɠNxt9)d/ qU"gl"'yij03]ьi^GiTTBWvf'}qJ9j@́ |O +wI%vI]7,G&3S3 Ξ3Y67_TFݗ%c$sKuF@ g^𲤲l2^TLLÏt ߵ:8j:F!( Q5s4-j{hyVT3OwsbiHlۇ] z+4$SۡA=bX 2}w'DNS=tS" 8vJKfyM|Gܴ ;Qh $/>oͿY,!w19aaK40Թ{FfjCu,b #ѴhZ+43 xjB4*&"cPH.T > RY2y|]X~-zE2\p>YTU^ݳ0c嬢U9kF RoeX:Ib"Ry7Lɖ*ºh~0a'@f2p:[ıT`l j#j (q'Gk_>k45u-/hwtEBq\` HK>4??E f429hid7h ><_n#/|!ylOtc{Ssu1}`:^yr5#hL.V[E',iV1lg.Zv9/wtםt/0RF?OTYoL7^O>] ^ NK㴎+`ِ8}J>8oq 7'8 騹g ^MUuO`fWՆWH[!V^,êeR nP|l%)S9!הf*ݛDc{h#9@KwWq! ]?2 qtvwo)}FYT>"c.E ]8G,~D og$qfa;ße!?eQf~X3q`BғGqV8Bg0_ ʣv!SdrYI_'#IAJiֱrtX",*y+[ف@pneob{H2yS9j/'Z'aXz1}q8ZpvuyG, a "C8 ށ1\_ Gf`i?uVQvWMj(Jd~.kϥt?'YIUMc/~!.&mԆݎEQ8mtkϧ`# OQBě06h1B}DstWXyNǢ9iy0G 5zewV4W5h_80s$#U.epE9@bVŠ-ڗ\FX(LQ#Z!_߾rh.x:K|R{^ڿHQS`תhdGglU@z̎n>0#U+⍨4y ,8H%< 0d34w:<XT#*.44{ 4פnѫ 7&kJQEr1yY 橌MpTe{_qNHZrTa傊@jlQZBsZ gnS3pd\I<$A&p6d,FlQŒO/zU>LZW+l4yhd7<@q] -w^ 3m! Z]d1v{\\PFEϷ)F$aC- Þڜċ@iGs1ԎUշ7]/Y&mҒHx7YBCPE=`\&L$ P%X4+}Ysm۸{fքoP|:6&bd:q /-ψi?$?N Lوi[{rQ.Pq 1wB>@M5b~OԹ> 7E2X#`h7lbѝep=,=8jH,DloO ?10Y ]@E[ d@߫ridޙX/䁋'>@ٓf򍽻H@2!b  tL]&rD[R޴#޻V@ ѵD&.GiZt;g?Woh@שJ g?$Ķ0PqC7_Mm qA6p!7ހZC۽@ 'G8^c&6bo0d I c ǂS v h,B*? [`i`4K?Uh6lgG+8i ZP6, qt=Hl/>.,(*mDql67Eݜ+ć<6">\ ^b;>46v9ys}-g;:ܚ'6q{;w_!pwdԓ"<6h3M&:@DJv;~!%Nc*uo[I$>s:$sxFXؖ̏:~͌h#I%o0y]DTl_Źt\l['ETN0yV?Secקʯk?Bg)u! Wٯ-NVU=}JcY愧9ZuX&qU; *<}&Vڤ&ZӞuŠw+ lJ^E}xwgϣ=>ppxD`P ߓ nOEE PBG[:a @ӏKeB& '6=`1 OvC0n?.) ҳ+r>\p bPLJ\=(wo 0 bPq) 3*;8ľ(JEpDGP'tNbґ2/l&/s苿 ^PI+z AD~,iO'A$[A xqwIDXUq{w8D0uz d2/K/.I5i {Gā $ յ!/y_ )p* Xц fB0p^D>B A?8p }fx6 ĸCޟ%'ui03`tA_3׻ P~q3qSDEЗNnX(|Aӓp.%C']E +>Z˩^w} v 0x=a2gVv܀5Rpbrg^W"M$#q0S<5ӿxE/m:LVC :R,ԚLWpEZ@8F].JPgjȔ +m'Ͳ,}AF IԵsE`f&]2_Ye*.o ٓU:|@pڀ~n899@n S59'?<Dw"PDg5/G2Q E2\ɺ ) E}D \;`~s\[9>]ƈ&%oʲYYYO4oח8˗jEĤ/fF亸9#ijYJρ`qu9.d].*Cv UuCPȃ/犭=VW`,E2'N8`BgP!pX/8K0~,ȤԪ{Ǧ֖\1R*4u w4?G+ش:sWks\*9hT7AAI>SւUgR{Vu JY0qE,+t:Ʊp?]?ȉ};q?^ jxnڌ.)U fÊC]`jAhS'Qz^6@tAfsp0+yc4jBߋ2d'P,qEF;c] x]Ӕ,Ÿŀw)]]aDŽC*!^QWw u- hl]X{iH|:O`q3BG"WcG/|O0em?c`9qlh Lk~9=@%m+Ť~NR&8/ʵc{@Lh(-z% \w)Mq)zqp6.'h9L}ϯ.kޗx.+MEEȪo`vƘ:Be.a͊P0%DMQig78 8DNE;X)kGqhRM!v[[;Y]W6)I,c72KS= ]eA=pT۽MI=N{6ӊN,hP0sn@Fei2Iqe < e[ʶ&({lA4)ۦMتʶj׻ U>iwm'76ץ[˷&*| AuA-uMP牂:[ n!nOBPo]Po Av{[o/4X4B`{A (hẠ &hDA-낊- *vT+'h>S=hL\UQ)R:_\I. ;T1@1ɮ8Xf5RKL`g9$| o&R>&܄"gYjҟ@3^7(̫ϖ7r+ᔸ,wʔ3 y6%-a VAlEݬrOÛ\ߊ,i@LR~5T'j'0T9 # MCM[Kz)΍Vd !^Z,<-k6G-`[&}㫻|Hl/'uUXI&/,SUJ6kmA_xe2ɰj.`ΚLzV4[N&w | uqJP_6̦YOld?AWGrx71@{ʆKfuYL߾ZһrUA;BA#9 OR-cI70ɪ# +$y"WQ>;ɒIO`W!PJ[~v˳ڷefzO ;U>`k=2@'=EȔ4C@w|U93`3pV^s1o@z =8Oi3KNM>n'&xpTƍwU},,g 94᢮\g)1'&I8H<+Qh),utLI!s\fC~ wmW78x^2{ P\2Ͱt}P]{'B# ӞuF"fQ& Nder095 VˠJK$ب@FhknШ`>P/И#TDڐjiNp#B C>*hqP c"#Zae]k@r2ՕݕRZԋr07mm3p)}.;B0oj@@xf.Dkfz?.9 6lwP^E`~/8 -r D k_cыM]1+j8е~d،@?IbOLPH2.e;Zf㉜3}&52_Y5˰÷M#2Vf&nbܣE܊ci!0γIz)0{SXR (ok s{/kǬQf"]+vz.ģA:8:}sW.V52/|߶3i͟gwE]m]ʵj)W~m?Ku <.z Ӏ}ҎoO/1l{wtwƾ7t "eQ-liyA*i6pʺSeIiD~'4@G8į@5v䆨0a,=W726Փ|9[fuXm"P:OC}9.pR4Y76`͍s6YJY_m=wlsw}L~pQA zv& ISޯC׾,&W'=+m$$F5Z9kb,NEeOEZ@N*T瓟pcL<AQRZNrVrɡi5(Q&9x T_q ]c>b`j0_#wP.<,?Pb)\DBYL}2n} ;R!o wxDE$:X Ss0b?8%ELh¢Wy se<>'ܹH:;X>hYS>YĠgA6;Iz V Xe<=V ƢцU=%o-N1Δ% ImؘG*mI R NCj'3{H̚.HbjoP=: }Fk 5 fb*JDK/ ;78\jd-Ix1JY j"r0bCݮyCW]p CVp@cdFo$Z[x1Y3~HYg`}찯u 2f,FJSxCƻFy+X҂*BJ%3> nwƫ\bkQX.7Yl"sj$B 2V3ܹ-جFy0}1aZPKK G p_"np6b{PfA栃ʐCT 76hFDlu #S_1B+h "7i BteWM,0#m"$uCW!1!-?2g]3BA$=.3Hz ܬNjn Qq9* i>lchyfC EB0̿"N3F u.ҳ ءǼiXHWQ|p5E ֊g8zZ/Z0eR%KFWEKo@fOXowb_#u?OϙYx`[SHX1ג#'倾mvzJ[#Dߓv/E CirضzYhoڭ˞&]VOڿ;&?m}&߄w{?wkd?>.|ߺ|?A#kkk00\<>~vw J=nt{oϠH7 تGj/;);P q{{n+LLWL$ߊݶ?qy{._zQ7NO:J~}Cw`qKdsF@ulUڿQ d|_2މؼT-2b ΡiR" eJnJqK{ѻo&ML曷}{s\Y9 =Q$YBL)P C(IBl%KN0t/\)pٗ'Q[|Qn8SX|Ý(pxAO+l0W=_(*K*&Z14BLϟN;(@ &̪ 6Ȋ96Ze\`Oܤ2#+Igyk֙GmFV&]sLqxg- ZVˍLxzQhs]3~Ǚy:Oⅸasܷn,@(WR\^r.5N0&}rgdxQ7t:tlQ7oM A_r_r.H|o.YR@)Ϭz&?$% L0D?o_q|8$̓!K+2[Z 8bvc/TTYeP*bRX#L?lI|'*dHK%m˟8KZ,qBy=qVYq(MX3_>A&zlА2o) M˝Ab$H ~j$X^h.ls׎/ !XMhsA[ :R _&] $AY#OnH0owq?P|?rpa<5x Aqr))X4٠b[=/ aȶ}.MʇH5Hɬ%!0:.4Nr#>K=U\=Zq܂^ )O&l(^mι!GfI34yXtpl'D_U\Xt{̆_5:@&Wy'Zjmfe'^cIS;b>lH 4] DCn|}nq xD/G\Aa/O8i~n8f^̱rK/R⇲زrIF(r*B|B * f8zyÐ|X 3cPnlдxzA5_Ƃ k]i.|M( h5.nJ=*r8q=K!{]|&tcΓuY7BugOw~~p& 3r!߫"d5Ռ-$Ck5^1 |א-V0%9l!L# fAN6KeV>B,^5m,o! ϔ$8u.j1],d6&mQ2,tG) :8uIk扥$ >#5j-]F  d'/J= ]x v`$ᘚLOFY9ḓe0H٠"C/I~l`JKJJ(٬F;Y񀕕ޘѕ ǩ6rcSb?Lo Q5ZpT^-2>=x%TUw$8//a{ < )DG8V5_:aiԈ8+KGHsȂu8)!@w5%mIhK5ni6!5apsiAvRWdLp~)T eKZ}FVDa$a]{ݞs=P`FkS>E%1dz%'^c%CrM+U7쌚UvD$oO1w=GiMP`XI_$Z0x;e;jT+*!S횴1/ƕu6`sC$8= AQ闚ޯVt:0eA42??=zAT!mGrlh-%<Mh! .J D&y$u&FaRW/9nL^is=ZH<|"dZCRa[8ʄ"~q0FNk5w&xi[Rh@m {9Dh*6 Xk ~x=,RWSbMV0RYpF{}#҅`$}Q,IpH.-4Jk=V^#BD9Ե-pxW%-T걆QrE! vɊ>SM#霯P/7$9hVKxp0Y#I%:,Z?G0ϭrVl'bXw ᪻ yFYh}r=bM`ĄCa$6Ip|Il4Wz+X҂CV*  peU 6 }ttqÞE-:jq*te{ E)&2G#JE-#P`#j蹈;23Yz'zT;vzVNtz1߶OGgLH1[ˍqD8hJ%ĭ,h Щav0>C pB$7~:Iu(eUo* V]$gY!*TIwy-<7)rE!Svϫf財sKrAvKGN&5m !p>*9 hrqptaSKZP$i-i|ƨzEӪqyҋZav^5ð İ|J)>%Fr}nwͺ _dg.>s]d⤓ͪkBǀq Jոwh)A&ȗV&=7B@R- {VVVA|/kF(K8#<ϊtQ| 9x=t Q:ʘ?6 =\:EvC nϥ & rp2r۰0o]wdkhd:`Zu`iB& L@} E/4PQ^|{ J/F3ӲxyҢ~N Låot@Mh:UKK #~r}`Fqn]B~+P p?b62s?:NoGpC ~P8Vp 44w:5Ĥ@׉)=8Y}ri0W(Dd?/:Pw'o80c pO7Vr]&W)1K]=q8 W.HP)6Mկf?1h_b+mY؎+&|c4W}i' wC`a? }Gr69~j) 㲴AY(x#Y[ ~v9tdFλODJsD=$YjdIK,kqW/>'ӻ] [GWLQ&K]xH&N4Wj̵ͫc2φ 0+s+`;/L@%)Pd;g>`o]-apW?Ӭ ӻ^=gY<T6wwGg۲mnGL8OgQ^`+L?ǿ釓I/dMIGT i8oʞ`soEݟydL/8B_R~ I05〪.FhS^~_@ m j)]s*.[MQZhIlpjw# u ;qo\,ھa8åqgG(*-آ`,M=UȘcnzkO;YgRL'mew6;5  ʟu4ʢAi@O '.p\SٹUe4kRB|;`ϳdS69< /{ߜXX^s-!P:]ĉ,TR{ 2]:NDeb?¢?+ AJi,/,ܔ U GiĭSbN[e66KD{l]ZhEڠn6hZ .GMRu?l9QKV@hW#humj;)Q-VV0U{L:|1]K~݂@0 ;d1c@\ZcℴK8Wm.IAXMpA:_$Q | ]]d '+%XfvcFh5%Pҷ:3si߿y2uXuNc m'ם_ɿg]^x7L4sj%k "ҡBƖšlo3q8OVK_}GRbe}_YZE7OټU,PJKV9QVaEVyn"J}E8` x>S|_;l_N#$6i XkHW& ^l[0~;bQe>׭ʹVw]:[b+;z7Vil Ts6hT2zhML[G6mxKf1TX)ǨE -Rr#5y&6[MP_W?v59fFHLL1wĥqAq`XSMq4B'[m8FAxM@GtcPbs}bhx(!/x ݓ/86nv- nXWG>d*oucE6c/kUiګpf]R-e|7nm mKM[9{J.l =.mO\~SIin?qZV%N+0;m:Xl:m:H5јKO_2G'0fLTBW!E^@Ƙ'=r_,?r#O7XJv^ˢ7"t>q1e)^E;g _\EB4;w4H|.ȼ1iٮ+ p_/&۽]=1*-垸!5u/h۵xR5i .U&hN^g4eoVfM$7@{-{09/8N'%¡˔Yo\hIH{7R2[_$.v.m1z?׽8;y~z(hU&?;'=yRpݬ(6c H >C,osq(!"XZa@ιYTCgq2t֭(;a؟vOtzWmՙPi:[5#Qc4c<ڲfˆrFbґ r#0q}Bi 1WYf:PE^_DK.G`.PHmM"$gtKĄSW>rG/2tgMdT9N! qa=N{_|uC/gQ`B. oo݊Jނ!ԦL&_KK^iQגW8(U&~h~7W?^BEr'YB 뭝<*pܩK:(ǫiG6n,S >|#PmUJ=xMMpjuIMTÓhب棨u2MO٬ 6@hG%`~gj UZtLϳߨM?L*i%hӼqRkZg SnovS؛viS5S8zb﮾"+z~4t_7M)[^ez];\! d*]1e0ėl-dze,X΢M. /gAiIT Vі7E[EC-RZK^\b]$r1! 'ب8!@(0.Ep)A! 4ɎܵyɲϮ j`7 sJd@`"jXhO#Ŵ>-(5WcuZPr?a_T]9"H&JQGQ~аyQ$Z*{ 4K IJo9ӷ+2JO+;w831YY(T0\w+%+8f1 RYBbۘ`n"yH(qdcN %VvTBSax RI`ItkثѶW1/n%XER@wǔA?ǡ vV1g?:A6`sXhl3!nKu^UoK"sB%)Uq"`aFS V2SKH(BF)xIl#nDo! T BZ;2]<`2mA/Bum_/Ab3s&i.'7Pk53`c}E Ԙ4cdh€H B?qxϋpS%MaG_o,8i2•<~}5q؎~ݛWU(tv3?:7&j a:{SHVqbOߣ``yx?b6*#tx1T\XdС,wcȗE89.;ÛGw;b+v$\m:^l1V{5z%eA2Ml]>fW}AS){s]q|NwFÏX\!'*Ao9YةƔ)ۨGtoTەmHn0ko@7Ypptdv_.Q3P`B[%!a`0J&TD,JMNyF$SLdH'`KX/*xegWU`c'qxBPK :[nw{↽G.= i &z祂<+ޱu JkPxOʻ&5<5lV@HSG5W;yԋdZZ6JԌCi[/x jjڦUF`0 aaA|s,ONd'ɶD_o& z_j4F0?աI&uaѭU7T!IIId (l,ũ@{ԷXDXJ ƢU"G#U"J>BE4Qw[hP3In0k4a6S`+D o'J*ZHX&VuO5D@X0pPwŊ"j0okzx70:VŴ L=(bxW'V>0dHJ.ih!ykqQLl==suZ-ٓu,L\Xj-=xgG +pV-XМfPB]5# _cC,ho>ok$L,`ڣ኎ ;J1;BH"֊ZH^Aa fG 7F+INxBŜ༹=o:vD()::eh8u_-}~-S3OeAઓ _wي:D'dN`Oe`U^t+-02< k/l^dZ!# OJO,=Kwώ`˙8]wݚI}Yi7Ndk0T΋z/y6uPŠ8ҔgN]Y{p?ys# 3NeHL6RV ;Fַi,CR}Uw^F :Ƙ@ê \P6zg0oStxGg6`B Yd-a4XZ.r+y9 Z;SFk+K|PZPlk0oŐҪ=Bo5iAj({\4kJR`mR'w$=.X1~SV]CmXGA/paq'\m]/ j7Qk!jmja{> ΝxAjr܏xo~,}L?5xt. #/aqlߌGp}"AºtEӻp-}].˵E)lr>1NCs_M?Cdg-aDqp>0AR\Q24\G.5jL3<@byllN(ԃl7_5D0EYҋ"xV_筦lR,>_1ֵLǪZEauUiVXꑆ!޿S=QʧcףH~=x=[>BᓫDG/ wA#G$Ig@Q\_i>ȗV& :O׎1pCU!IQjdGi}|}!\;87'is[+y$ ) QEPe}hӋۇkMh04k>fnN ~=Yߙ=kW!y Η`e Yo1~oeF^3 h8N\֯ q[8l-A܇No..9FM؞V|6rrYxbyіJY]uyk8ပ]G,G1O{r`NQ 4 [IP$>>'!DR VJU}j@T`N%˸J\X~1'tl Z<Vw}tZ6o1AarH(e~ d K3 ?zH<X/wxٿ6jW>`_B֞ia-`ތ~8l@HeCfҖnx!޶WԴwyWkҎ:dpy>`JL\^hMg\) y:|欨 >BEoV.7C!0Ak0U)IMj=o>h5ӄ=h=(@{U[^5bC͕6 t)L`z'܎K܅MkUW,P!70OtubQ8a%.Q8ww_Kl]HFR<_;qWWSL1,2uLNNtцVJܿ![벊_W˾mU TU-cEFON&< ʣ : |:`os>Scbˍ @]&\A՟&*Z,gI^$2Ht;f d y~bE ,[,0*%#~gZu˝{o; 'eugVC~b/J)ݝ߼3hJZC.%j< w)薷Uۡ/|R i]jI`-'VPSr G1( %IUP}y4M%7I%)$=K)|–^3a)Z6ƧFzwwDQ$\Y>l|Ƕ&O dʛ?)ғ JAgkѺNK.)Hq U~-4]p`NƳ =d*ϏD"֙@ʳEІ'XU~4(k b!,n}M2Ϧ98aq] UI'@E b>doA>K?ns2&OnteoѧXr18%IqUqozN'a0MizC-m&$jvB w 'S u3t)8s^ac,iy6~I| \6(C< xU/H%s`TdOX.`ƞִJUT`R:*2+h ЖA*H8$ul)\s#77[+6nʞiA?69]C~CfTٟe)~D\ݣ|d> ē1$ȚTϹCd߯vkxX9qN mCi,1!M2v?{|aK9\s0\D@PqBbv֗kJ8>~柿K[&M"@lL߆ӿmOxV됟[YEFY kvP#n?eBIͰCq_AKMM.߲ٿ9DOS?aneo) pۦٛ&qn9ŅԶI/p/TO ~|.v44eb~X~o-0ygo"q2 񘻠 6K*4;cB }e0"Nԃv8ya'=|/]Bxu\^ܟ@7wPnw quMt٦ C,1Wǭ.U?:z*?x远2yxٕ]7Mm>4:Wxs<]{|ݖ:SZ&-d`85v _p.}( %T~\-lw{fqR~|dkrw[,?mZ9Jp/\4u+eΌ]]Gϫ(({.>˞ś?ѕP 7&a|40dWǤoͼ)ߝIh0o@Hh@1p'Cf au*ucW@@MueXx.׭Gڻ3 wgkKjMQ<l#Dd]_M˦;YW闬77&4Pj>)y?U2nS3+VGVn~s?\ͮrAz|͙c*/72"DBX)%ݒGo^ $z4i9j`Rik80CVB9a# iZHF>P؊-}׫#a)DY)g^I"?%= Wv-ͽa߾Jpu:Q>]滫,. HEhdJ/+. ޠ3و$q2`gTLyiCtn rn Uܵ~n_]ZQ017K޾v 3acYdzJ[!L ȇ qְ۩[VWP&Wbs6ռ=;h?{كwQ/0az<BX+Di,rj(Wޠ^A:F>PCGmU߃A8R362r}9iM@UitX-delZw iZHF>Tf<鞺T6l??&Mklbb(8V 2N+j^aEΈGJ4Vr*f9 XH4" ȇ og_ ǜ3Jh`(*"nD&D #-I0 ȇVXk<@կaiwpMLjXm2CƇ `6Up^G(} "xrA.pY[FIUi3b({)M)! !ȇ NweME$ FDIQ9}Y^BPG-x(3|și"ӡ gHa' hyN6\0M MKH"*]1݂^ס31Ee2ͨSrk帱\Qh5GACE;%)KY*h3^l~}FRiPFq’PE6*EutR;!m¢cJE2Ľ)5kJҠϽH!m ȇK?B@ Õ鸛z{Åa<$dj~HKM(K _%PG taȇpz{ ۠xA-! Ћ">:oPXa{wFPa u y B3Ĥ R{(;ۻ>8"GG᳉qIƃe44*V!`n4"ڌusQΤn3\{JM^|aCKS ١$xfltYfu1hh"6# :b-e![P =X("r- /J[3ZB[AjP1$z,*XiĨ*ȴgDh\ʓQt/WHB0r1O v8p]!Q8#ٖ`^cRD>Pq}CfP66b$b:*O;$I҅,Xti}QB>TLNfQ,?c>fGwhTN: z8 qb9E0zNQMdZ4$3sC&m h.E5!J "/7E~Pǽp19kM3]'SRox@ĝ)ڮ_:1kJZ.\F<`ƩY%N7rX9IW ,k"NK Hg.&T! izt B>TzVAeЬ4'@};W"{{n癁(.+=Uxb*#ҪVU:h<49=BF#sw*6|ReтG.p!=o@3LU0NctS ¿2qהw?q¥oJ}C=yx gOugk7+h&ϵ(;~I>z蜸BzlD5x4j}}()ԜEq'o6{wMA.v@ĝ 6EϖRz7ICJ3A+Z,<5l_;c k}N #O s u{=_q{Ƌ93t.G+xӣOM蜹^wt'k]?4[/~!̾u;'O0%SpO/{9o2@k/c$l*tSv{ !~zybt*8ۣѝ G ! bSߞx2<%} Mx:$WF,9Ȧ{IAՄ;I(_&Gh/3{(Z+> +@P EOj3ݯІ=\eЖZmxا| |"[M*| ^Kx)SY߽S{§ l(\cbPqȎ>f\+R#1O5лK!~%NhR m1hIp~d*x5ԻMD b&j"HY]UY|ҴH "_|aNe*VPBٕ1wҞEl%W +·/n;v++3PEzKԠ3m@ |%mfzJcƕsz$>utN),['O -K6|l|ޗ*JbCʴ&HߎyiT"*T)f \A iE4F X~ LH`ʉxsFlX󇭂c@{}Z "1~*[qtRv1烓r8?;㨹f-z>Kմckf ˛]Ws>:9z<3 =vٮ?ARhǬ@iHϦ wm}-O9l q'?)9HUU&_IM*Ѝ8!I:.^4<!*/"O0-@V`1;B$BZ-Gû t֞(kOȇO=ܽxjBsBN/Q}~cHH·?7U˶4!RGkAx$|d?A5θϸXx:g߇jw9FQL)ר .w|ӶQ@CE>blk-"AakARxHY!"de !I?y8_i,8#a/܇-v7M@TݶVwtp=Eٖ%˒ 0RUEz4Шc/ †\(& ܁ ga3V =g`NeDZ21]Y,wywt5ޮ[o=[x8U"tط01 4| 5x1<ɼ!F <n4DZt7[>Oz(n7pQ. mq]q pc1ܢ2R>MB]ʽPC}B(X$!oȹ,8@/Pq esb?gzZ? t[M %¦wq=<|/4u◥_~<аf~Λ/~MW5@̫*Ip<[Fjj>;pV2趯ϩ9U͗/?B}ڔ=H*׸Q6eX<[IeՃcR}rA - wR+e9SJsoML}Opn}+.|k(ul>MVX_Ϟӓr!U=Ol^ܥn:7! խ觸PgV}viWm7KYG!J <_?"Gw4z Ιy?@%7snv0L(ZgiLQCᎧ2*ˆFQFweJ8ojDOcфǂ󋗌(+|JaLaZ >sI0 go@ceJhFpֹ;+UXبšut k8:_TMDQv>)c1lz& #Nj=_z^y}鹆ޜlcU'(`zңm<ʽX1]3Lh8k1~#մ QMإqEZ*hZNg[HAJ@GwǨ YG'SNQ?2 4F\n[tB%op82%FL :la|iǖ:@ E0OQ˓G2ˉocINpr:\8W)b 4F\Ri;D&xj 9ЫP0nr<%yx(ړM\$K"5n#> J|(&`xZ~%wq3FC! I'G1 U_V:x>_|mI=4v=1n`fb$Rʭ7lc17IM #@hG=շ[wH+p+>ha wz;}adpzy,5{NCy< (6plNHU FA@6`nVM&쀖HɄ,!q0b]RM4͌Nu' H<͖K'[Mt`> I# { K8Mt8q^ԭ<ܚ/*;h&"Ԍ^^ǮkAo&1=v=zHuFA Q،RykI 1*dxrC{ H-${%8p&坑yoѤ ':162!]';}z5ppsh[kJ4 !0uL60\eLB)y.&I1yGv;?0MBۑ~OZ_[ZJP[1 JN(11pBJR `,xC<1$K`T'9yʲ㌫HbU"D7:-$6S"a4!<" |"*& Q72`dnنMę*´< k(smcEadzn@;sLPv~R|:3\,SȄwbgaFs3c +;D>ޚ0zl2)!,nrNDsƛ#X.L~BmH~_\N݈r؜2\9FJ2bcQKY$ݍ0O،f WG6'<]lI /ԯF8SvU@z@xWX#ۤXF|Z5JO"0f]{.,]UKгHnj^fLPtaQ-z 7 Dc'Ax&$4M)ryJa Gث{{xW6[9T,TjLŁwdCӦE}Te'>J إ"JsNM~lwniADZW ޕ=6z<ȸkQ%d-Fxω $yI9^BA<]fџŹMB;TIΪ: zu@xWP,mb2hbAjv/n٫⁧iz`6>P̎ʻ0wr̷ؐgRsJ)1Mv33C*JګV>\ ^_^Ƙ\4Cx]ҥ!oz@sԛ2#X{<2X?ҲF;1PVOp=u@cg]<leQ):/䥏0AE>.̍o={{4wSm}Ɠ CҌ0!TWwֹ]G664;vx?uZGc3jNM_x[%Ҕ,Od0,LƖu|$Cj"Jhwޘi Cx}dl0 %'~6_lEvnXvriW!p!M`^`&Lp(Ur|-|h7) [foy -݉*KQᒔ.e$3UY &"ފw t&jByWH[[iRDf>SJZma$P.S di&X!(pF"-?K׳#xWzRԻ }@_C0?5~b'l,@A|#/b M)dd< IEsm3EݎPmzgvoM4b46йX&IIGXN?s?Կ'yS ^p˫K/uՕ}N-S <s6Z IӮ >䋷!t?UMJ<^,#u-;#2|&V}Yt3>Kx,Wַ}%]GeF4sjVxr!" e5m:{rF2>WٺUY]GvgwqqJe?x3#Hﺱά*t>/V5nr|Qg:|i]V )YI!ǚ{u&diε&ݞLg> ;[rzNTLŒ꥓B߉nhޝ9UTbF8{fŹNVlzZlVm Rԏy%[(z0Lዷ6" 4zf҃'1DWeh@gu nS. i¢E᪅E\`+[ԚB69bj`d[DIi@MjEFU64zfN/BPyCZ!_ y- Ԫi3spltg8B~FhP5bT,S*sͱs50l}1Cmh N3;gܳWNuE6Wúo4+DY#{ {eN@k]& ;'\n$eXlh̜+uqQ'ø\vz hEvgNjr?&c=Ø{h{WO*qGTRGLK#Թx\=L W0.8Q-Y vUҤw&`IN-$9el4ih['iɉ=bg&I2McQ|:&m:P͎ᴱU'p%jЙ޳W ߏf7 ,ع vZUkl)ֳ5ԌyH$U*֣*tJewqn83{<+BN'4MAwF?3BNu?5*Tr@(&҇-ca F0 Tw@kKSPc2k>II+x h6aғ1J)ɴ+A : obWAl:wpfPч +H2?qԆ;[=)B HMfo>gjU>ѢXwQYS)~hx'$`O1h{3WLۤ{X*ޖ)>}JQ|oDibs9C"/v1W:83r?ϖs1Ǚ#XoԨ'7U-ǃoE}yr%ɩ4XbDt3HrN`|n4 GȊcJE*и۶tmՅ8/OpE4j+TD nɽ.sLS-ͩΉ-ZOL$ )4t᜙2|[γAGӪAc~OY}?w^e|K /깙g7f8(nV}{04\eޯ"Y|x:ZS>޻qS#FP Y+s>)_͝"hkUkNC˞U_?F@n~ɧ~ h-Azp5 1guaJ5G'Uw!%ep)w~]LQ yU*sCJhv#YK  ciI|Pn, [LzTt6Y.qg"@D%¦\aK[Ki q4[W=]yVe*/'S^`'WHFd62W 0 U)hݺ̂\1cTM&OO-"SZ{12pBeD[{ tr }X{ T=TY%jLtl Wf0Ђ#$M٘hk$%ap,5턒顩KŻZP8fQ]>^xWtO3#rv \ٜI{vWmQ1ʨl-ijA5( IKNoʼn1]ܿE ]ұQ{S7`=U)<(_ jV[Bl_Fa>is3\pR(6Ζ9 i'N#!\*@vUf:fsuo0}w.؞aF9y XQy{?_^k5CWoPGp2Шɇ7v1A EV~FnrS͗On}eglͲ^:U>"ˆ~٤Ft3 Z?]B /kQus3ߍؼwpߟ۷Z``lBD݋r7Wz&T20 lⅦ' q@`* !nF^3|84٤p#>.z=3:<~8uW,=zv(?Oߜ'|7>0 `+;c _=ÊjE#`e`~bp+?wW Gt2X=0< )6ú+s_5H[|py~Wa . ^lOnon YlEqFJ ViĤ^BqXA9QD TQc;[ ~ޛ F.ͤy~ 6l?C}gH%RKsd)hI0Z(]Ҩ'#\K_J+eY{۞7K5R7n66⣺p\orXMz5gx~>X<Ua֬QB oǹi-ƯdPplv.&_h5;j&LJQ7dD[v1!SJbA v07(-ppXQUH0Sw #]0#<* Lჼٖ6,)9J&c̰JiK /%pR9:pNy/bqcPmLEŢ %1Yc?P8U etMY9m3 NYID̖Xl) qeєhb$[RJZr.*di|A%.1ILVVLd5He{,{:<,p>A}w(a!b oِ;kWܷYMhgu?+frgXjX8 e8t}Z}]lYT?`)@*f63c}pt3Mo֣r Ut@{e n3;0l^Na XeyAk/*un#"v{d޽GdXSD@.Z^vbp]]-z h+ ;R8Eerkb+raS`%ƏomL~=5/oۃx>^Zlp?~x?;_5h dXG֓M5M%Wͽ 4wypDe{} k/cv!WE]Bʭ"O]Kc n_?M $4 UD$n\wPK~ŏ1֧ʋk-L9-@`Urls'rLp~_ָ5 /ʈVe poDPTB*YOF$> -"@Ow@ I1#j8r*/ Ca[Gvˇ^׈V58lA{ѓ$ID:x|5ApVS:>cofwꋈ'ev` e8B5:9ߵ~g~# N2`V}_ 1z,bUXl_*tIWcFv&ɕOMRVjeiAI2/tZ1R *e]^LuaUpyUp]7 Jϻ$4K+1HBY3 ZaEc.*', >R2R9%#5o)@0RdDRlJŻl1ыT)l)ʅ`+p(bug"u˒})@/(3_K(X6胢`Y$kdQ %KYΜS֠R#Hip(zX V 1}'2z(Q)/c.#ދ:hَ29if|H×p~^Dz %+-DiA8W Hn  MfΧHEKQ|+Q p|_F/RW4w^0X1 L+0vܑŏqq%J]RUR('2>`-PJTj7=ɼY 9bU-b)YL1kWzyjڿV*VNOQቒ#J\?FqHkgx Zm%LedU1&>oNzղy"`Yl6N2BLrx8h9gGsgO4L"!362^xY?>OyCL Eޅ kxJ2 %f0Dkȑ@c?́4qQgrZX6̽>_dz=SUx]mxSlQa:{ kƘ~56ԅRIFZ /FߋgCKUdW]':vtEf$N3s=Yym +f="r}5zo28S- )pZ9 eXT.ꈏr h +i)PI+G-R'e+P p2~G=5\6_r.aNѹrv\:,Hb Wio X=W\>=xˤٔZYh5"NjI-lz/>T- *}4k*wea}{=M~6,Yy2(݅*k~͙ _.lwp؆e᳍?U%|vT/aypCGxUBeI5NnL-ݢNKnhv{SMht ޵".Ri;Ũw?~L_-9h-?_/GJO7g5rI'RQ.U|F_?~Uh@ie ? aB;?|ېrm\B9 K|{?TxvZ|͇r2a+?_E_;/?p;bP_܀v?Sҍ֧Jkp6*8f/ W:6qׂsRN T:s|_ZZ=(#Mo,g-Ӛ sk Xs^q ʜ1:gyƳA GTu-S.Y"0./|m!|/?_y50U~7).ʏF~O4V FY{^Uh}#oK)~p%}E_Cd{%Ļٮ⋫hٴ:pӗbu+IhضoWzCiٱ8b $vr9땳t/);XGTgXq8:/d?|ן63<;b?ʏ5}X>hawŒAwaQ{j`sO\ϻqwB[<2&)Pÿ(.a,@ODAƽCI&Ct^M|EHs^\Џ' 1!?PyUW{˸?s3E _dTTCs}G W au%Qjo0yᭉ >LC-n%4[ܼ#Zraaz$ m) 7LZ^6<^ڪ Su2`i)쎢>9F#NJ1Xns,VŀoE2E3[P4ip*Ee,ٟ\ֹlg:v/GA ua<'̤ЪqdY<@y琙Jn em#kr.6œ~BiiWOFj/*JN~HJL\ŠR8)\5ʨ}!@erHk&DvXe0(4}ΔBdi&c-{h$pLwAJ'x85,ֳ% H2gx9(i'U==LGs8PT.[`( wL F శ*hLȩH)0)_p>aD>tc Wֵ١VzeLɎEZysL/t ]VW,G! eqүeGVcoA"&rK5.S W+ NUA噑vvYG ;^oc."/F PDA 4h3Hj$i:ml-Ue;鏟tN223vXxi9L`6g)݌E4 oRsٸ僦>=]47kr$\M6=k`jo1Nׯ7U׫LկAfB͘>A/UD$"8#us J*}ӯOԯWF`vvnM |[j} vӱހJ]ϣRos@ћ0%^Iiݙ#2k*6 k8B ]VwtV9YN?b%?ǣ>\ 9 TwUnx$HeE#۲~t_ƽ?'#W}㋙~?? kOTu0&L9&(o8!ygO"H9Lٕ_ml$L3H0#3G6 K#SY_@D՛qZ ƴMU^d$Q'~ SI_oOAdl 0 P?c,hR>G,=!3pG]f|T.}8 "`~L' 1 wĽW'g -lwƙCfYx}[dB|3A;u9# LZgOGξʉLH/%M%o,ZG:]ЮMg-0 #%hvl֬*6RtW5{"u0Ѹ(~(DDQAfBL xr ʄ4W돪g0ZF׍#:=eR/-0ZF C\P1`AgRĂYAA4ٴ-cA5$iډX~B E"2[`6#-E ldXJ0:yP{tHl$8%h l^IaTXs2_ޮvm d"oqu,݄M Kl;Md8MOk CGNY,m9Ԝrdy@JSf)hL+Ǎya%vbl}Ҁsc$:ecbQeDMN-b5ws$u)ΦKo66LG >V +Ecƣ@Is2BŻAq? w lps0z#QcX0y e:G~a>GZsH6Q>hwk̮K&堌 yv-6x~T4%"FaH8n:rΣ)YCG%`yG%Hͻ.U3ڀjRW=[GHXV2F͋}~sP&h䵍3D!+%ap;<_)3-#xN+$6?ZנT] χGّ!=oU:r9]Ie6a;oމ:x Zb&-0ZFkQ8 }R(x 8zALsG~mqH<0ݼ H葦,t 9)^Se4SN e|>j#H^JJ X{39A\[g23"c}μcuްD6GD۾TBO% \X򜬔tƺg%&2FVj[\5㤋ݧY簖[#A4f AHdkQah-㥅?OJc=i &WI⭓Pf,esw-8eMn7[?LNJeDqI,cGbD2[Fik]|׮^T>Mj,KQ35U%:]ՙCϦm g8z! MA 2өN+dbT\YBFlqn bYGOnp/Qpu@"փ Y1Δr=Uxe0`h[Qe4yJuyMl˒*qRLɢMY$LQ&} ggy #nSAc16#6^)x1qccPب#JpQdMG>SH#ah!,ͩo110:8/4bƟn; 45ԤۼI7Y<ƍ+,8-+3Ȁ_KU/aEΧ,eڐd -0X!~:S .nƝ]Ͳmc0~pHpښh0F̰pǔu{1&j8Xv0h8'VI2"NU(먔.Kk8Gyw7InthAq>Ѽ0 md$;J Ge[k| l#QM·Ä]uLvԝ|2q,wMFNd~f3>`$Ea#R 0wEe>EJFKHF| |LMj8}\RvE 9DH1 'c҅~l<:Rv}.wˍw5qe iYCjϤT#UEvёLQOR/a:J(">%B; s6;cⴐsdbw5)`RG!I2z5I2r@8ESoЩb'yZk8O^yЕBʌumt|>SPKAӏ"@PLZCǴiHc  $,lWء֖$[ފv<z_Y172k5q$Rl0Ӈ>6zÁ0eJJ,VS֒#bقM}tLT4UϹlɷal)~h)$y)IE<'7r9j#f͇ʛl!M S״-0 ? .hu6c'tv$N#O<WC"hYCkG`A adOtLr dcpfF: O6vS62:/ w?g&4iAd kv9j\KM8B0rS`SJ LH4?#TeB64MtHN[.=r. "$o,#|)"5@ KzܗTNQxy4B0Oa72uP]e tt}>bX:=w((:Xw m0Lr=t@ ELpYF2U|smK#0Q<58_qNl=~3Y^A3~=MdQi1|vfyq"v77?iF_ܙJW V߱!0x($NWo4X=픚m<ŝ7Ec6cƟcx`G0V:~ؿg(/#0i/ZyKuKH83^st@JV)j8e>+X\nn9,hɊRȁ̞'OF⒛ ?NH䰻`pIm r!໿^ Żm|9܌6_ 76j 8 kb_jԿ1Z\pcd9.*'^ qG\52xo@zGd9fN׾x=,VoǏW< -Cs;C_" 3O5rUeKyUi;|h0CkЎfc3JԢgޝ$ohM%Aҡz8xH-t,8I~shb>''Z2:gB}r=QrE's;M= u ( %C\{ 1u%l&~CH|'iݫWGI>Y}/eh\OaG{ ]Ȋq:tHu߀oa˂aQ-.}(3Gðex-4,F4ubr8ARpX̬"_ZaaY ߂{;\ηY{=h~{B1o{"yGpGs~|x>fO7~}Ym$,>MUn$WXj64EW`)@*R"1c\0d0q/) ]+V]}l/.Sd ^Ʊ\zrG$T^K(rq4a4[>\f[&PY2L| | w* $vPN66h& RJ8Mڸ*&jiE+3[}r–=ΌJalbhalH9,A y%[n'lePʣ`f XA*h4{v1ĕGev 0gݝZ`ӟY X(TXfT`D$5w=2rkb1`9&}QEahp8%bB w`6xgOdݐxnƏo,F h~\VT!݇'{//ei{gDbt`XwO-kak~Mۂ{Ul*ug^RmpQvt,6!a~=DžECwv5bg%qOi4S)S$~J&}Jb3D9& dH1]ZE`A0-GCqn6,?h*ksC-:s`-SȞH/ܾM CP:eY :aPg1 UE.#XţMQ"X 0ag!Q2ghh/o{ppz6ܱt̜EYt̜EY1SIJ(5Ic2*ĜD;FH{&Vj̓kvwysx|s(^1K ן,K$eTKRӄku_VB=yBUt{% N݌>S&5[i-mGZc941U/`EN}YїEˢe}Y,z_ ͒ Z}-w_kqOZ}ew_gw_kqZ}-w_kqZ}-w_4FChܚӲA68CIjCh)(OsDQGmqODn: xXhF\Ye'tF2Øt&jϕF^%[d "Ie$Fj*u4W qlRLp72&km8c33ʯ;$I@6 ##LqI~0v3Nv6LfǓYPOkITHڱ3[dQۢ}UHif%)h R40<}RngM[X R(V&rIuXa$ Ps|V B]$i 'T.94U|Nv1`Msw$ &\;UW8m 3!Q"p֘4,JܞҰ{)r#q"mk?\ ZڋA}ZG6+c9(»8E% uILˆ1F"ȍʭl rO"-71Owf/nWZ,4_ =jBTrO٥g`C =-04ק<= xw8.a-ępa]=g#Ol M'w-g\Ӡrn=TkjHNzL=Szϔ3L=Szϔ~8La=qzl=qvU^?5ɛ$/=H2>󣋋ϳUM +eBr ,O)-;e57o.}?>fl0KaΟ<'9,eFawfgو$Vn02DŔ~~l;l3|N ir5WT6^O/O^}.Qe׳T?@ӦaTW9M* ϫ% gEPX-7?.^~K `FRR}>6Ssh:RnU3iю1k:{Hil' kWƃ/f L}7]qO׫?VM+#pZc㺣SM$=gGĻaVȃ7Nm\pyoDml[x$&Dv,B4wfwbXNI٥zgAZ]Q[ pe.plXNo[;Y%١nɋ 2̛uEpӢJk-5M`^\O7vy޺_5rT?;Uաc%%܊kռ!yIWT[x4ʭ={҂.;K*, NY *6YҹiuڋuW:~-/O}P.p3 խQJ|V#..cIF62_m 7Wmڶq[Vn]OgIZIQO.%B׻ QHx aVtUUČ)wiǙ2wIO51MP5y' X_ Kݛ "2^VUVVw>0!߮ H6mrWˁj8S$ZQ2`+ ͥ!p>HD(" 9 d 3Ln>()? 4* O &PB #tv E2&Lּ"\&mG{lEE*fC` [A%+Y7%+TؠO(F%0[ 870!ER|k/w\@\=Tf B,: fj)˥L#wj&3:N^M<{NgwݳOe'Oep O6.6>b73e7=OͱX yaG 9YsΑ׿}"`R4]̔]М(\{0اwfzhvyW}s0=(cnGj\^faB0/#MF2@Ց7WHCG5_ECXszvIJ`BBOsZ1x%N&嚄{Cmo"Z2.ejs1ՁpXr:*l4z2?\pD<<桍k960qHm'֧hCtTBŠT4R ^Q$b}Kck_:7" d5v љR<# :78՛I~2'}QMyB|pBvƊ-?a_G'DC 0%0LOɻ0b'xοL.NAh|GƁYbH'JǕL.!|*e?/#D],_G65Yl*+u|9iUe[xz>_njs+1vBol55,d1`_MϠ<8`zcC[ gGtBr4~ןvO}]ќGglS~w^JWϭm3d3$4J"xRTay|yumS9䅾IQڊ#gC:aDXAROF )Y.J$IrOTd3g9ƸdϼXEI0=ҼmPmcryce{_ߧ2>ӏW{ƸulN\p\mmW^Wu ]?? fyN"uLѸvz []{\"Tn6PdἭJ@6Ƕk΃@vzb}#H&z.M PT39 CR" ժ(&8`9.Nzc0e7SQk,tXs)h@IaLVqcys5:c%IUMNI)(c,V`aHHǤ'KeT,K=-p Ql5xQi"I:xF%I bW^L$xCwFsF[=5y9j~QfDC,DƉ v11h=9YLOǹ_a)JƤml,NJjks-hkQk/պ :% aj1MXBVI&[&7=ՉI]>L*0{8P|)|p8ۇmsPRjz~rLǿuoJi=6-`30jF.mp{^'}MzYoz֛ljד6i\$8{Bksv)47W $84hrZ$ =zXhn\z规$!!pS[S!&(ɘ␒IaM`#&'YNٗ;sdžlv@Y1I7nѧ,BQs}_7~C`/z~*=!|TlBЀ9SOTG_.#cl̴DFGτV()5έ!9x`)K,hn0Kre75 Rk ܺf6Mن 1).CvQ_ HhW3 nJWV9d(j!ENrO>(wnZ#^Fͨu>;ud.2A8~o/BVl=TnjJ`njWw9.@L~~)gA ˽85Cbu:gO+g%$~=>ۊoo(έBg n|P @2GrC~609z_֞Z+iVKx5eט^W맵g^9G@Ckm#j^=г\=г lp#{jZi0IHLyhnKcNHLᥫ~Ҽ[[ UMTSAX Y}N &Tеqf kTOuY޴Fðwi-`v&LȷuOfzj?_ r~5T , `̪o1WՋj<6mS;HxEY1S1rLFMofMք\fVjUpi(CF8%Mr+,idY(#* H_s9^r_AK ?6Bi"͍\?%0[toNqXfm)P;:\|^]O35{ʌ=K`_׭\SQS^֞j~0'N恫o@,g4*;T~qFqkbփ_<ӓ~Y^&/C9X7 rAd&JυԥwBqe˜ļ["xDݺ1؀}J/(Ni)XB<%J5Rڄ8˶ }"땨VFx´I >ZbLY@M`8ۅ2B »c be(s!Fp3CfF(!!'A(CԸl yd$Rhb]F("ָ;NHE5^90ۅ2BSY]CĆCЦGa*E#S)!!Tibw),&Em 1SR2 QHKXMXIb]F(!< 'rk,2(mlK2B )ֺ?{VdO݋f,> 4Ay3 >#HN&)J,ےE?s;*b -yN6!s%FB\\-A^RX@%Q()Rԥ|+ ͞".4g)xjz 8aUr|#ufcQ.p)Ϭ-4rhY>Z(-Q8YrZD\x| -7˭7W,Ljz d(Bye 8΄(^ Ljm#BvLx}$4Q޹ʗLXF$(Bȼ *lGB坰:4b$4]JKP;N>Z(/Q'`M)jzTO0z ]By:i"BdD 5(i;>(;(6(02/3,睐6Л$k<1C¢آӚfæ_g8- _\-ZϐK[#|àtBk&'F;32PQ#>V22.A3iiYG=Hݥ|!Z3!h-s&у.:p o/u[aV D˽>Z(/oۘA1MxL@J`.{Hh<;>4W46xf4]4ݬ| -';53g35=ӺU2 }>Z(O7;$a1oU"Յr&yJҘWByma_QhGAP~6q#L#F뼗$ 2g!fX2 b:zץ| -TԷAdhbݝO^Xb1!GBg%zr/BB&:R6Hhk:&29b^0gu)ҵGB AK]ȥe/ V"E0 K.G -V IH12&4WWb,,W@H #Y+Z[+_Ɗ<'|^SY* U:ӡ| -GZ/u˜duDEF". 9}$P޴gu˸-!hV,$Mu:A]>GB l lW$k`֑$qS}$P)W2HuS3ɂS6 t(CB% n=bȌAlX23@q"w>(oMmIxN^&4BJ#N*ym›]LD=oדx|:vFcD]NgNc-||>wnՂs .*]zj}A(Elkz+Yӑ??>Y3ǟη[ơfz<8m̢ߍ@l;g2h.mvm?|5DiІ?JLZ9oyYB!7d 5هC=&P}>dj:j˕0P}gCM&P}>dj5 ݰru'?_5`:koag.姝\PIbz8{ɂKΫZP^TAD`xɜ ]y s"!, wE I"8k.5uŕ"Ahѣ,f/J"p[/$>aH+i8 ua.UD]n~\6ڧ-NUǡC9iP}(>Ň2CL&4(@Hz/k2tǟ:w.VDQ<##l&x0Se|cTʔyV|Ւo_b\^x{^0v 뉟|o+2q-&ڐs[;7b%>2.` ٬d)r,Z8k[)j"xw-n/yf%̍ p|!ɠZz62>{cz?J=~Yw-4yx*RǔɅ'H;p1R A|)4zA(E |97ہ;X`U -,=yRtAO˿Cȩ;>=3#b y;5dvhoS>J/S[OŬrH ]]ls^z_hazY;dzz>?uQ{!\_ 팾OɤWrώ[Zn$_~E25N1er7dROˇEcfg9j~u.7@':kԱ?7^2^SqoDjt|㈳Cѧf1Z;HA",zyP +EJDUla=+mTzF2lch\' .ɧKTE&9kdIC4M278ټ&]9[C''/uQ=St_tj ?"1R ȝp'td3+e:+b|3ϼP0wH|;^B.7x϶?6s{Jb!$Tu&֐QT+*` sj^O-q,ׁh A2qJ*xm@g]8(6GnNϴۥA+ 3q8@.6jޠ^madj |_t8闐{~n_=>>0, >y|Yk@ 9.RR+Up:P.l psI+NxiZ\Pcc&Fga J,(u #FC=9BE>gE{Oؕ;[v>O-Cw|,%Pb*ENQJFL.m顖^:Ҧ背Ы@\z]jV",BJ0}G?0i9/m:<Z~5¦~j؇将p9X܅ӳq9?,]_Gy,Rs _5O0i4RF{gǧ*73 O.?˩ww/;֩fe3anNdz{v۲zQ^>6QXJ Fo1x?Z)G>5']t=Xv?L݋Ju!z{HFԆiv#W@.bM%㮟Nq8;=ex79SQ 䖜3w:ؼZ kH_#㻅)D>9o'r@4jѧ~T̞Z5hZmS _ 9դթ,]j:pڣelә䏟f?E/Yu;J_i8Ht/4^V,rk/џmfZk>6fuL6l8X>7mQW[,eSJŗoiz 5 h'V*O(GHn@MΑO1Hzqm,Z8AH-<(e>k+Nj)%xe7O٧-wu[&.5ޛv^:>I,)/E{?]zr7(]s7!Gcm{Ѹɳ @ S漐,Kjo)k> |jY7dnn=ܩ-DI9M#iM.G]Ie U* 5M1Y%Gn\97N!RwnF,4d͚Ӎ0~}{2`mQP2֐Q S+OB9 F6s=d+WC1^-A*]\NVH@ZZB*^p$i˘Ǟ3ʟ"kP ֠en}0h֭ MK#C8gKrEN+I)SiFܸZ˗Rf8γ_6o=C nPN=پ ծ&BvB끐t@h">wgJ%\D^.yѷwmwABJQ:/^]TJa 8km,"C u@k.I W.{22M!Cpc|1v{׆ B a,<1gD+Dpppppppppr&+%^0aVGEg-x̵eBPYF(aߎE[fZN A9G-1ZokknMv Rgwkf2Nf Wkdؔyk͋Zd7٢)dj6s99 }>zaOqFH 4Yi]co/<ϦAzE Y ͷ >XV qGU5D ~>bѽ6so{mٻ9fyQ=ГHBO q\3yB%ʫ z7$k^q2A Q|MK|I8s""K* xLme\.uͅl:Q m (CA @)u1ٞLSx%TC6n*Gng+e-9so禠xgrv.v?}r<GEG>~ /|bYa;c|g13wn13bVY3w;c|g13w;c|g13wӎs5Y4gÞq3{a8]:g)=cDgÞq3{ֵÞq_j 8 Aa8*g5:B rm+2׺/㛽,L@EdL{ roCbXp~t.uɺtS?.e\G8OF0@!kb*7v,vpWOҥ #o~Il4y$WQ҆C )%s"O0"0uTP ߸)#-6pŠ,t]5+Y좊y+.[ (tu3^ M&,۱7)a8ts^m^P^ "b/#JQc2mF*Mk1KV3'i:]8m0LH ezBGWXϏ +veԾEwNݝ:HDu3pwk.{De-m^ѕKEa]HdvE nwR R yl[_Hr[.ģkO旅t% !zM!Xbs9"㭾,*FB!ͼP;7q[šK2H"kCz*p.`:_@­sTz>rBDGPF0g 1Syi;_l:s@MM^W؃qVw qMmKݸܽy Uo4PO_XW ӽ *0j&%Fy%u@d: GюS,DƐTdۉ.+#y2ܹ#{R18#/QR3jÏhJ潅\Mw?ͅzC)-{[N>#%B2 Y\BT+Q:u:$UqFIޖ3^ylOX5x~0ߒY^.~^yd|3{mKU]VopoѢ)tt~ =vfǚ6Ct1/V\*߲d_)aTҩ-ݸ,% ǎRϟe񳡆+Yuf-0!6`ZlS?-Vѓ̨E$P)+c14629p)yh.>n]^Bdۇqvnً"}ŕr!k c@ t@ =5 S@W,q(t*=mvWߡk#&7K[ӏڸ\voSQiLQ$pT@zw|YOڝ!_ՐK7Lc7 $K#O͗<=P)ptPކmA8GQlĥ<*T 7!J aY,9 D;FvKLGCGHe䋖ĉp1SanP(88oD"_y?S-ͷ_yqw L(Ӛ`, M `Ժa& TAxTFo`_TtIY;Mv`%*u\p`$X*$eֆDT eV 54@P䫘S(~t+8IYK+k\^!K,6x4.: RgaT>q?yk:Jb(B`?a*)eILBT2ĜӁdEV]G4l)r‚A"47Ju&~)Rvp\ݠ8DaI/[T=vv?Oқ9DtSʓHp1?CuӨ >x9>(:qh){RwOBniDziIN.IScF1ZGnq}1N +4|ރo2h)= P1{ =HTtpx)%Q.[fdhl5@%׫+|DzSC M!$Fzd *0D)< ]ƌ/yn.HVG?%z+vLTK*nPH_?]N4HȡNkfu]TEiƻwĽS\"XaU>!bc\u|v:niI7`b4wgFC&P.5S` 'sPFa6 t1գ9!|ݞALƐ,rd8SD`1P&WYb5H ʞC'JU)-m:o<BF$ap/L`Dj=\ eA{(ېO;9/vLmܷu&5:Vvn]yLxeŏ\=n]h3\cfƢS^}Msks4EVʹ -BsYA<NߥuBco!r@^ ^-,Q_Je}a{͎P#0n;BAUpn;mg 3v?p(2$bD̐1C"fH !3$Y|*N]Ċh*:X2zz2P.,J6ch]rG#rl~;.ӿ#'tǶ~].u8~!Juv9Qt;zMv >vM|\!S=]ĵD܃- y=ZΆgbXo*ב Z1ͣHǶVlkp(C{呍BqSA &u*`+R#JHg:}hw!K:#iI٨_l DW)% zC Q\fc"$6Ȉ w ܳ@],ӑ q)TD2'TfiX#Ay {-?P f]v-–dŠb` ^!k$$K߅S >Ȑ:q +9l1$Lgn-l({D8oMjE/5 !WK}܃/*ry}M,, c>܏/ {))~Kӧ|\dE6%Sf CMKbTb33٧3ˮҥbPaPah%ΕK}`)J1˥mIwqK\f$-1aZu; `K蒷`A'iB݇y6Ŧ!i!VnAkܧR@q7ykgKiYkG|On,[ Ps7:MȠݮIs% +08"a]KEݕƒ>]p=\ΫgAOmdGt[" H ˥CR *?""$'h^qlRaSDS>N77x>%Zᭊ{_GY[W%-+ C=XSv^}S#!,;[NMAgP3~$˹3yb7pAsu F}&f8x7?}g'XF/0KOKDf&lTЉX?H1fͲjBU (|/]Cd *0{@r8[=S3x<b5M, (2@y4 !HQf@iґMp$^<ɶ?E'nwӡ፞q!HJ #+"^p)QTbFF#({;dOm=^kuF+Vvz"$Zae$>/zWj11#NߡEl;WI )6G&&‘fipQ)vkHLk2i MDio̚^jT$u6&-P:hmx<٠>[hDs&Mt2VLmo3j|ięajYak!.cFo{8cyT(vA;m =NӽӄfMn2i_S`jP?'^<*zi /l~PMU PZUn7enG2YݽL;QU`}[!]TO6irl0sI9Ĺ2ޱ;y}[ˍ֪kDJ$G\B&c#r+V#|ȃ7ZOeLj]nT; E6o[\xo#{i87ANрmPpHL.0"4}Y8ozЖ dδm)F e6J1B;+9.F (wH%T8 1Lk .DpCLDK9 v4 G:N$l:A&NǧU\7V0=~=iS;V~?[p^+FǮ/?#~]l|!T1RL΀@<cSdĨDZ <-,=v¼UZ%]W)nd_eK^t5mE]{koķfD*W*4LE|W]tV ,,w1rɕvAD0덌C2D\+CGI|γ !P1G EέRȰ, Ƹ#-J))$tϽ!(w帡׬TylEi4%\uB?іKuu>q}6SA遑uÐUe9LO,O'؁ڷ6$Qs-y21:@8p0 fk(W\@c< V9ZuR6B>(n>ž߻D<%ω`Y QW3{%89`|j9O-F,p PG"`"RSFDD b FрG!eLD /u?ή'c5% ݚ4agYTN-w1uU]ӘG:Ac,6ʌ ;,ڨQ1,%! j C o1gN);Z>;[/Zpݠji3f3 siXJ6{6@'ZξLxO:p˜ۈmμ9p6+Qވ #:#nb'p\Kk=1F<\9ϥM VЖ(cBG1<Eus,5(y(hd'E !I%iAvj*e;Q!(@AtZ+n'ēΦ\4z| S*ߞr%8z+S¤p;ˁ 噉 i *[vҪE9t8 Z-(RT耣J9y N:Z`4Z)ւ)w# uiH@WaKݱ-Ȕ_"|lH9o"Xd97UAiN7c(HAR&k^11Av24~JE1&xc'c4cwټv_!oжțA[$VϾD8Hu6[&,| ʬl0-&9a7 *(ϦywxqTdWk1Ls}Mk=NA-a~PJUam▱$10q09sH[GAe]Vsu}ٻY93 ZG 3f4j|s)h` N}MKw :XuQ6#k*dL3=WJfDzV'|6jb !&H X8*a㓋{c1]yFٟO!D!e!x0k5f,`ZF &6MVHKDgUk3ͧOE~koF-)9 U|4S7[k^f--Wv= z$7Nm6vB'fF[ P& ]LjXk ]?ooͶ]9t}8cVnBo[//m\{s3=Z-na׼ /Wx-ڪ7f4wYzŭIK]׹W9jFQO)YoI֨z] &z{%V`0zI?;(Ǐ- ׏1QġKl_ZZw8=,QnLhyh__}(IllSݯu]U(wM=Ai?Br|Pb'i}>22kT#CO<7CHS|y5gqR})Kď晛&b3ȏ8sc};}sm0rI|iҲeiQH9mg F-|>[w+M;|'x3= BtɧS2,|6h0p2 (-,@Q"]٦`.0 $- T)J낎[&6> %ZNR> N9ťoW12#XP1g&vkr8ܵ#w\b-[ȭ3>kGf ((YQk %I.P!`V0/2.DBO ȅ4*Oq#+|J@p d26F4sIJ&e%]_af*)hL> 'H % PBXG, M#P!^z{C:FHHjbKFbR$S, A9,aKUUMXMв״^zMkS:a-(E١yv,Kjw>yj;Dyx<y.]VlVAƕe.uu&:Qix`,f(1xf23Ńi/J^{>׮y8`qHL cEL]%'q50 l$eFZkXooHg9DЎE~%/_dHѼ75W{mñ7/LcZN hŖ}JS(Ǿ(%` 0n0|;O1UN5%|g`% U(?ϝAX^#_" iPfA-,r,Y!W4(҉Kܦ7g͞*.eft9t<5 Cӏs"LH|[Ua5q/Yi/M쁼Z)G2J͘\ 36;˜|rZ0[,3C04ci/(D) A6ƒ E v·V.D)kb%]H cB)7j.+<xoE5.Zy4vC^/kز2,JI,]D(PBe)\@@l[$q1Җ5s㓧 u߉TKFǤ"2kQoS!pQA$,'ܱX90 ELVChaKPF;l\ ڋ`.XewT#F$!T"*߷:6_GrRR1daؤ`@U2ƓDP2"D+RT8= WmAxZUrAwPws;.?~hh"a, c6N}+>icXӰ|FRo ç;Gj)aIb,>.xP"!eɏ/~N)gIxVFv+"B}[)ov 6v]}U!,+3t3 _nR%H~9s&KL.fOFI᜿L_7YQs򤏚;;6!Ugᶪ\\$i10 7]y[8!߽<6|ۅ8ԕAF=wBsNjmmqjf7 ?`}[s.Y  lQ0ehP EIG4Ŏ:%6`{K.rUzA3%B*=B [+5 YkL(P'JWEfbo$2> ŷ:[kL&V5tVܸ*<1D9޳Pmv9[s4@7+{hbK)(SfTUAZV6.ӶV٧F@fOݗpzv}w+JaB !py{EC+4i06` m5SHC3KFD}o  +Avi8`1hh!gػ@_ {eeNq1\f!.aw2jyν:"ꌎ̂e+Zp";Bwbm &.6FS2w? -lMs|X\mK;XݍGEdilҿ Ҙ0e_++gT@H0=mt>:r5(Ir0 o.F[YeDƜAfH&(mBD6oCNC}ħKNCLY䄚#Òez:vy":񿶦6f[:'NO̧4aНHQI{p֗EN%i)i0GVF4?k.QkJ9x"鸒y8nOv5|XoI)2O4'Ƀf[_N`"NsRqrA3[i[kfcEV[4ۯ3SN;:#~B.-%WRGa yAITnh8]>uƑ08 1p}n9Dʪh[ ;6]@բfOHJ[R -U}J`)-avP3.w7jϴ҅Z;tϸ@ Խ &j_=PE)o9bSCqhwJ:M 뤷k}U{VC=JǬYО68i 6D]aOXɖA|xw:v>{[b$H(Y9eLToΙ\Fl3Z YԦ^sTf"4VϨ[rf#w^:޹T!Smhb! *e(!i=-Mǘཎ% Շ0=Wm n̬䮡jltBkcsuV h90ZG.F\za0,v)nq]⺉] <^bҙVgp02voV蒺P7o`kaw!zƌƁZ1b"5J#Vo}rtZd&]_iﳕC$[?j,1Ⱦ&Xmmb+C߆E犷Ds(Zb{k{$ٟ%m?]QJT+d5 f΅xSv.[%^Rv? ?Xw[S߷Dom{;<>m}cIkvnv~ZcHqv6T. 콟]6#}<t[Ez>BQxpgm/47o!ۯ_Tad l;39X6^eVp[fE wհTEՂPin8]f@P>tOڊ"4{i3]ڏm{K"[ݔ-pb9t/l mN3@^˦ܾ(mĂkMbI7=dܚ!{L|[K-ZaFo&;on`!$a,\>(.p*ū@_Gx2?o^ CkCeOfFW?'cy# !3Bw8 tL>7* i4iJrǃ5F`1*h!.f#RJ*,W\yDӸoh f;E1 DE0FhD3&Lt2 VyLDKN W, Qz!cT ),(lPģRHLQ4XK˵Ͻ wQo&0g1Y:i; 9 wjLm$ww;ng1A?m]#I~(EI)Kr M6z:X2P+*(hrBXBEeļEE0:`9"PFh(E815J>qqPIqhz2ՠp4\` Τh`HR 2"nb =Er[T>6Mx?ꎓW7F\QIe02Spe2D{ZIxN Qཱི! V>6TL`DkJĶH:kȰvM$_;T=gcE@Ec>P+v1 } \,Y"1Z(OJ;+ҞX5|"V\C>*K5{,|{0VߣOݼ3og.ը]7xO?t.Βk:EE]yf0T8 R¶0ln].ZXxJ@εmwmyи˞[-l:2 Ah@{CM;:nlۇNؚ+\Ntݥ%To'S 6g!?FR">iZyG`7vNCm4 Ϸ9y׳ڃ 6.Z\x\mu^|T\xZكk? @GY}Ϻ>vPVm:ArIq>7^SF$*RiL0Rd=II8 QQR\ 0pמ>˕e.7iqsO]u_^EҍӯQ5d@-I.2½geJx%f6h J ! A ZqXJa**S*) 3AE& $# lA;=q <6;1k(!J  6`K|P`B@8˄NSrA>xi9piΚ=Fvطm7%SDR/NچRyak'ATZgL)J404l؀Ei6eޝ3{ٶ^K ٨2֒%R(C *'ULFBh 4X&^ȰV{B;SXbPG ڨ<QJOuٍtҚ;ΪЙ=T^\x\{31̤#[\F1rAMv;wV^oofpאַ"Vynx6?\/0"Q䌫$}qUnG8g6t^ügXt2/ˊq 'ߕ%1%?S}h@GK n|q]`VDNp /#γ$׫*C>;Bk^jQfre\J H>K}UI$%1^_&I1vY~Eq)ElfIrOI`V?7uB{lt*j l \;7G,|OHoqJaMM@oIp8>=[ɮ/!)sREj2~ dh|=@k#[uS Xo2牅]n+0^DM$>)EA (qjKS$= gϳ!+:?!?&5}1/HFDZ牺.~7b˙|k<ݡ]u &sh1.s1Å`rrOIIOpyЉ]I-Z eU %]/$ǵ)m 'hZ@ b;9$\~ U7Z2Ϧi1=6C *l.$Q]~*!j41dbЌ 'aPMobX$h̓g#G hU !3$daJ䤿%>0R}oˤ/- cjľ-X C*F*ৢq*$;Gn"~iv>FbZ۟jO͚%xT+opX|{Yq9|hFE?!?f8B6 x;yTE'SL1=ޣdzwq\K~@ :{$kv PzKѻ)7u~ =/~]Y9WJ~[cx}غ"򦜦TPR͛jql,5O?* q  VFbPrm;=3W4#~l4heW^QrR~$p1\8M61kHPv@om-XYStR@Jm4Aa {mQHU.5k&r>,Ii`~wJ[ϵzGVkv>ZAM/Rxs*9uz\:ꙥf[3dm9d3'DC}hB:HWhqߢe}DpB)Rr}Jń6.!@w}A i?s>q! xdwІ١mGmK>:^THHHZG_E&O>rנHH(! E9lO'8E )>"ˍW/"$)8eJ3W*JƟ8q3-uiJ)rSYg=;HKHQ&F#W"qbR͆PLj!9S1HJ8KNPXކۡ2q6= |(3R.y }ZVyXYO~sO]k$rcedpFnjJM .ȉwQP9 6$hfSoUvd9n7ٱ::>26>{#uFZ` JP<62_kcQHQR4Gu]TcկGvZs-\ czi5#yM9 ' dsU ʹ"{-dKcI\Ѝ?h BSQ ';<~ϸKeBCY6Q[Gp Xq6|2j͘ ;d뙃{nވϦyJqw58UlS,Juey$R+E? edQ#YTȢY4ؑ#.yHH8yHeb?~ֈGȼȿ0.,@vO.懚q(fXLhUg)^r E> y^n r/g<ۣ緟F,]\2v 66=E3鿟^Sd,f䛃G{N)d t+WR! 76VMscDxl^6| 8s`́1ǘk|uyd⍙bke'jZ~:Hz_ՇɪtנR#{03ٓCen=o4| NIʭIoCcV2~aIY$-ʳCjڔwՁǔea*›Q-#jIޱ٤65 <0~4YXnƆy/ΰM܏ysHL S]d8v[^T .M;B΃ 諝!ku]K;8P EUÎᘷ'I/_ȄGrg-zV'OǍ%cװg!IX:hø_:frc%5hC"d#9< Ts2'j,˘VUH&K0w_ֵVt7Q8Pq㞮3hˣ dZrQ=2ԼE\Tpmp#`V*|]d3QQb*&UZ[Lq%HH:3W%AVYC(ĽЛ7Iϐm]p>׃rjC7})FB[`nkçḏ*aJdţRm´`jQ&;dpJؾ~L-WA;|k=VYXf12Cp,%g2'<}IT&[4_?l,'q.8:>;/h~xp|_Ew^;c]HbJ}ӣk3 7Ni1W:yk)w0tvz@ݠ1w{2y<Ο1ujly-iι]o}.[^{vy(E7{wByXТ;^<+tڨoٗXrG|\zܱ'=/>^W{?nx2sl{xEUUk)X9nMYF2hI<&3xIs-= |&-E^PV1i9!~sˠ|7OσڀH+3P |I0M4$#\2". pN爙EcWj*4,+m| Vq J)Q!eJȢ(l,ؤtUb-аk!cU^9]ֆd@H!h5jn$ 0cUӪUM{JM{Jѥ|a[m֝F퓥Q.3^h M6.Jr;xu6-y B#;H\ߡ|NLƑ=l_rJeFqh-a3P$TH%@kq%{j $C=2R] %l)BV'7 6\\lAw Q `}T3j*q퍙KEG5& QSH›2O6%B9oPЅ,2$R`#ɛIh %4!3A1f9TvG&\n\PV8#iUHS6,yʳϦM ?j.ZUbJ U*1T%PCUb)TbJ U*1TյJ Uf+Ws+wp g`P_%PCUbJ U^%3rtyw”t٫&$X&OFBP| = OrK-wp5êʮVêrXUaU9*U尪VêrXU,x`>m*Uu+U尪Vê)*U尪VyG^9*UծaU9*UuVUǪUiUӪUaU9*՞9crXLaU9*U尪Vê:JW *U尪VêrXUajA +¹?XAp2y'VB_ZmwCDӎX̞}ư^a)kL0u/3%W(%lb!kQyJ+fRajO uRhZy ^`t|\jџ{ {)Pʘ2:ҰViS43]wWľ0W a,5(Q2G0V.i9x֑;hܐF'E@M>0i-z( > OcX8'J۵n#=k]>ftIZ]<*e0 F)!TD+þ?EQq<L$KyI&%`2AY_E}wx)+){s_na@(M:Ne7MK8d8-6 ӓa v]>Zx6*-BSbѥ "5W/_<}5vu}A,{_ϸihVJj5QQ몪*$e\mQ W-ӓAK'D$Kgsҍ'Hֻ[^CM>7G4ѳU%:|!?\pc`t,>:;!BM/[ 7O07Md紴ܒWLp%cvpl4.t`:e\p 9@j%[ȌśtNyXNc\_F> nO"{tt"[<X,giDr?Y;kmuڟ>@wpњh O~L"[&)CI6"W,-@t~pyCF5 au|QO4ZEĽMlb |nn~[c[cWboQ gfI[l,Wȷ_7*lB-+21r'J̒Ɖo6F'~P;rfʒKVBu ʾ OeN(W{#xri>˶Dbm_1H5_nym,Yg# XhELbXCn9= \#DƧ8;?R|7[zKW[5g$JSûﲟSBYd}i=*jQInJNq"D X+ -+:2LzU1]oxڻ?Y:V=|/ b =@ &{GD^JV#Eޟ`O.g/fvk-m·3a;t*UUwt.q+4Nb9cऑ^[ ,p x('cu@Dʂ^R@:s9@Zpd@I>37+Toݹ; E6q2گCN6m ˂{[fqΆX+tE+[]{s M/ UFmv(VE*Bqns'ɂO*$@C:P]WR(b1noCGI๒/K 7;oqt4v/n/ƒ V 6uuF& V/ȱ~I.V}"=yjK(&M0oW&) we0GTZ VCFLRzňOVbݹ#n/Y}Bhش;Bk8?_[X9'j^bb*aR4 l2QWϣp<99[t]Bz ]OKOI?Ütm&1NW%'@a*< O8hI=謥̼,yqܣoѩwzf!TғR|[wq+h4˛W^~Հmj7s~E,wY:x ٰ]+< սcwaփ!K-DZshYh.dSrHw~$D(EmΦAs;5#I_֍qJy ǻ0y5򔸢H6k}#S,"%U7VYŕ>_XOnB1](@Ȭ"Ǚьj^tY{n{jʬvyt۫LY} x+)2>G,7n7Gz. 4\5^.,A7,6VA Zl料0#x&Qew܀0O5˺Ch.O  w0&*Z^L $jCdQkRm={0#&ْmeL׍vP9qqsrk.mmR?nk8̒5$B)m=hy9AZ|,qoQ-NZ }9ޛ`kX^PSr,vխv74̫}vtm~{Q)LtYuc~2LV]5NEsZ,khiQm:{^u{=~R1CPӄ3"Gˋ3UGB\OHÔqrQ[$\]}[H'%KR"V(V*T@#.ΈfZc>-O͉G Cz%yW]r \s".=_^+;cH;bEˍû*. (Hӭ9g ËWߢ}ESD  ,B9⨱SV-}۠>HR}٫7;zAS=c/Uw5$v6"IJ F`M.USW{7WW-w<[7}l؏hԿH * p+xѪn.i{E h9o,Aqӂ5 F3-vYGNIژ鬉ב^s^n~6%\Q c#ȄPoꍺ(>{c2Ųe10/lUT*aaOۯ?w I-Mm._"d2REc&߫bń5_Qhgzے\liGֶ^fٯx0ju1[?Opv Xᗟ N04.2+vt7~&ᷘa;vZ> Lfh0}C6^govT`]COfCfCjMC˨ot+'C*.w-4,<@82g|% Wob#nDoM|;8k7 /g BC?~@光9ɕI1@,Z#AކrxMgB c1:>L$ Kf2B8#>(pϗ! g:23N~~p-([1 ΀o#FGqv%L_u}vi34D37I*%X\iԭp˓6`xsmNJpvo/x2Su]Q'c`Y) ],lP1(*cFJ+J#@"v/ |'K4h+ż $GSÉλٹL'*Elb!ZÕ+NTK1lsB# tbn\!2z2{wE Avv-G[>M1ve4%&.t?\yj,P'J9ӴBq,gJ~D@ϧcӫiW)I1VyO_1{ͪޣ{t~@|tv ?Kg8.MYeT`'?s'b+9L]~$o-ǬɀٕeгbG_]UW9,gdKksWa~H3+`e#_\+ -x_. 7~6Vw$olF:>k X{@N"&8Ւb 䴕{ɡ lPauq|cv":2'hE}":3sɂ_*=0. g< W A^dLȌ^ǂiĈv!@eDrciEkk6Ndd: ^~ZrDȧsoOkRmn{]y9+\7v%76}1e}m7wk/p,qst4)$NC^\|>0_d!t Zb{7ufST.+Cdvy:`YBpuyb`uxfPl ØhyU05ҫ EI=J6 BüdKҶ 1]7z<@IΉ˭;N K8iΗ[ǑY43nRÊGS~- Sy X>2Z,;4;\ms7hXCl̬yJ~C74,Pj7yCüaGg&޺RNDUY<-q=`ţi]UiIXItסrivȞ9P^Uv'oK|o0qV9Q0#X b[fT LDbѹ1eFlc}kB[CۗZTΞW6"jf)P]BUtc'Uأ [%xO+&&I?)=-*G7ֺTST-/$}g vγ\}ltk BaK T *)Qəw[uLuT *ZA{'B)< JEGI])*i2䕦 vy=WR?%_6z?KiVѓwշ9Hl_GL1XjkԈ ~7P.bp5!.j@2Ii77D$0Ĭ"FƭFxI0#:EKTx,*yl[Fpkٍz+q=R͟X|ulbWt,K )~Eu U.Ip!CRd,e(20gJ OT.Iy$qnvזg˛(iFhr9gQL\#"RL:wAHGCjW X1Eiۓ)Ĩ5r:PpA %m+mMg"9K;:{˙!Qo,"2x XZ)l=Ĥ@'EV[xztH9pʢQ0.*M$d3ln)u^ym0F u9Ga )37oPS8k!nJ;Ot>jvirNv1` r.Y &0K?,UW$<1SZIJalу"'"i)8QTe1&E38'ڛv{5ώԹO4T2s]r؏ݪ#4_H y\tnu^9E{lΠD93M^In#a0V_%(FGτVI ,pn ̙CLDO TVA . B{6ˤgHdy"6L3?G+%^p=$|FS ڏ)#+}Jrɻ!ov-o!m_P;=Y:Uq'`nzdSHdmW(gm?J$}= v߸fXMx*LGC]B)dқҀP+T@DE˞]{/D#|-D ]pra.xSGY|vz}6ektCu4b'bFߐ_Xej"h䕖9c$2Zqv>JV) ܛ(=3Lh <-, Q"f`.0 r# T)J9tѵ'm=nsžR|YAڙt7rqV6Eat~Zꬷ^I{ݬ O9=E7zQLdzQL%&%EV\z̸jG/[1_Xl0׉Ί72#oǟ|56W#\ BRb>QxD hk?b10waZ5a0ZG!xEQL&S=NmΝr)kuXCo͗R4'ikYߠb~fWQ)x既Iݻj)*2-obTôhOvү#jwR[2Eej;gw1xT,ˑ`:l,*9nRllC "b##0a$oǓׯM ,6R?Tǽ3|<:f ؝sbt![Ud&Wú9/U͟3"NǗ ~\;ᣓW ?vV17>.LSN&l3s: ;{?z7B?ͬo~Pvj48cg}3k>YR2qe8cg,k8c5{ \s~>8cg}ISO; ^W_\S6_4W^69:*:шGB]jh+54vSiDizJ#ߣ_^nFG O/l‹`[OHBr|=@X i0 2G[zDp-9$(B Ř`||Z),DUq+RU$+yd0%| vo_Kn4%3ȖXR!&4e -= F3-V(EbIG[2xtq=cgL13{tϘ=cgL1sĄKD2{v3{tϘ=cgL13{t|=cgʘ=cg=13{,c=KZ,i=cgL '+XN)@%/)TୗVX$5+vvU mS(?f^GC*]8eqHhUiU"bK(F[ `\"% 3Bs4aO$ޛOr2:kiu;ǶnwY|j'/Qv^mSvmzw(͜R>Y \}:pUf|=:/CgvmvܵcIؽui[hhl}0L|sXs'(5B)7j<ۤudXD -J)#RHD#ɠ[23];]VL6Mâ.].}vO6G~6d)1MGU`ĜFj5RqJE*:b~OSOJ8w] #_>OXgKl!F!p8 )rǤf52R#JHgx67s i!v7ndͶ\JI,F(PBe! 1 n`ƁtdH[}[*Ֆz:R2.E Rr:03,* `  BH&H`rǦ4ԭ{-? *Z-Bq1h/ap*.DHļC 1z' !^?w=NGrRRqJ%JðIǀd'6 d.2E1$"_AZ}=%=Ogt",S,-?pNx[p`TP[xQTajp>Je(_1vy2g_CūH͊'ؼjX -o}Q/އ0b_ yxjy3αK_wܶ4E)f|mXd\"1gE'IKaXYVu6Nʮ%X6ifXܤ)>ǯ{]-V~R1y1(bgL"U1 KGD0 :>cSU?W38.PwNktGtڂX"z%$4.*>]AŒg kY]aTX-6ױ Fć''>*'}|9˲wrlX%^੖ۛ?GYXhЖ+`&>TWGLE tڮ>0],Қeӽ4|3HpQrʡLH)]QW'Fߜ풙?I;-~5CFk VVMni]-VoqFUѾVwu|yJ:-Cd *0{@s8ɏk#SS]_ 3oKb%KB" 氌`CmX][`2'J؈ُ J=|V)눛@z&nV%_Y pd*dSS0#G_jf2)suCUgeS9WZFH "D1]5 G\?3hTId$7\:k aLt:2%"Z8.=P<#XbYdkށwZOM3hqI0M#PRg鸚M}Kv:Ym.Y/jVWQєju Duj^ [V3gN#ᕍGc!6ŗEKb O2IgקjɲdM˒gMIvUXK=" +2Ƶ:ςR|HHH8BW z6ŀ9$) bTmAL&Z`jDzN |65pĠ;|+9Bz6}td׻[A5=B+֓Ɩm6b_+ $2#2na bE QFN 7_hyrMњ;b(>qiRo%wtk>_ `GZP:9me ޒ lb2auq</܎gG/8PZ?,ܖ͞\E/އj^o+}xț~U2Nfoe*dc}?n@rUgܲ 0+320Oxc!Cw!ԋԡ?@lhBe=oM[)wIRn^$s|?4 ziIY$-k5Cw͉d/LTC趏##궓{ffOۜn+5:ţs1% KAd =Uv088f.i3ttwc^P3+)>%@HS } O)*Th&D)\Spq .N)8\bYmY=YpqYpq .N)8{|Spq ߔ(SpqJH)81 c\S\Spq+8\Spq .N)ZT)8\Spq .N)P{ACiuu;@0vP2^G?T_vh:|SK9YZqY,%2t0у4)PʄLsKD$հ9$DqŜii`DP)X9C g2w l:ۤx]nåG28Ӏ/aӗ^de?}D{Ϸ]߬)8%Ad H+DucR  )utVB\h1RHwxpV[8QȄQ N`90 R8r8E4Չ(E4lK:6 #i9r)l7(/+ڧrW>[J;OuRss /Ѹ%ka1$U.kGUKus?c1ydTP֘4Dl8: } ,mG2ƶ e`zk=adeR]_mvO.|x٢Ӎ&K7ik;Lͣg6+B? }:|USn]/[vXUj>IڻZ'=?4"sF[ݟ4a5u~a:Y1gk\MгPCN,f 1ڝth7,b6W9nR6ai2qA,3Mm=ɞ,)Y D *ڢPG'u$L'd/&wCUDr\Q,U"bw8ެll_?ȿh>CQ 5PDpJv'6,omFkrۮKHNM=t8O}o ~P<d*#96NjmT*$X1$R*&~k w%o\eXF^]٠YyFYٷ,.$LP u{Mb`"|L+= 2ߋ(WaIHeӂ*ظR@%󨐙B$^ȴYȄxD uÞ8 xhJT jIRQ5D%U%W}Rj)@R齶ĈRH& \0ZEhbz.hCL$]F߼3&H|2 Y1tC tK ]R^.oG*Y||SwQ%Z$q8l-k@mVxPslɧ%kOTo$5XjgINzRLQD%RU"x5(X>-X9FրN(j4#\J=9j(Q WFȠ*Q~ \Xύ$!\& ᨴ9?SR6pDRT"~J;JM^|hAΤ= L*-FFPyZ5vz|$\U?"]jثn?7b>/O4[ [~fyzKKgzB&+l]|fWu\VBl`"[lnK}{Q-'n;݆'juZ%\|,yPs:& n?`\U$xl>2:O/?`P3EcՉEV'6J@fVD/ ^E1)2KUUCsLN'Z3е $ >TLO]x1=ui˂ߠ$oͮ7QT0Fx%)i<%e os*qgZml+҆+ROu^G=;|_TJkE%440a-gwCA\JR8"\ZE(1Pw҃@ݮdJFͪuKk 6 P-2B\dP A.bp/e]yT/97Bm8OO-bU bR kb,"KBg!RV X^ɈSHЪL2Ob_ᅑec@s{@spom ujE+1d w/Mv&M&S݌n'S̤'n4,_?MlYG*i$Y*; Շxց]8BMQGkח\ٗ|4zjS$fyzyW~wSޣCvUK#`cxYċ$˛zREISuOj]w!=7T'1ԵQG9aD %W׃8ٿ+o[tTh4ɲPR'Lt @2WjIl'݃[<:.mSd3PU+kD$B)J RĺfehWUHBWS\@VRJXvzp8=KlmCZ{BZ ΐ֝rUr=R)PoC!29A(FߒV/g2I["9yY"bH<2T&Y"SV!ŴBNDPZ Z w" dA$sv\lvHN+HhGdbeRz!|h]?tیuizMT}Qc8372rO39̢EQ)4e io[Ex{Jx?VvYn$)ʀb7fɾ1Vq43a9-9`T^N HsblH*'ehV̮XGR+YjK}OĠ@%:$p)Q+ɪ:B # jhrGCZdOG%f Aܬ |i5GD3|a 7 y.h&Ą-!&W5Co>xxom<Dž`wHSxm6xxo}[ӗH$=|Iq@6n@eƏdAy>ϓ Zo.#q^pyo x\88Edpg:{eg6S(UF$βR`bT2*o-8ɄSLhyS(:).1z[8D4JQ,IģbJeg#Z4 ar%zCۦz6)޲ uR[(U;%t9 Nрk9-7jFܨsvn΍ڹg6jFԨs[ڹQ;7jFܨsvn΍ڹQ;7jFܨsvn΍ڹQ;7jF|AqhM+sK h΍ڹQ;7jFܨR i.s2 ;|Ye7v \wtkwO?EV-,–8|LӐωZO>rFJ`b#hԍR7RFJH)u#noq協R7RFJH)u#nԍAzRQAPRtI.a nj؁E2OHA# &5A\:h uD\T8b7SD7D1QJQcCO.|@طG>G:R#|3*=+3/Ǎi`yGP=z4xpsaNR xH21 X|9i儶U=z\r/BaZB) f|L.\eVj8ɓ3"1 ǝѫ6-2JysN >vk,_UKp)6Kg/0k_0nC#MBIm\B |Hɠ$;"2h9eLYKB:,g8"M&'Ǵ**KplGq+x|W;Zj56Egc+e'RcoC\;rRfGȎnG#3\EnYE. p|~^=ۉكC}>>q0B7 o|g2;3ʟnw&h T}?,= U/¯9׿Z[o"O>^t&veiwMDk%]Ar~ }ϬH)[! =['Zb}7}Ί]d5| wG$ f =˺I :ݳ`5xaX[8 DnL+WmlU[jm|7AVmk"}] 2+PMo1;k$[ƑTTҼE~|H}r3:㯙;B},qQ,;66ytֆ w<0 b _1A}~7tmvXRfebf]e{kh_uk.TZڬ jiQT7mYPԬ8Nx+9N*F߀\vAwJ>SS!8auxRH^}J uE0KoXV`v>r?,(#sv*¦U gY %au Odi]=?^ PUP'U> MKlhz_BBאxƲ:ObluZӲyLXQtpm RaX F1kȌ&ˍIhZhU.*mI󖖅h)CNOT"gs6k g3]TwQOr&f赒*\BawJ9`YIMY $\K(.ۭϺ4I9I\$Ԣ&Vzt&+`x&rL0ὅM@#~lTb5k3مRso.>)R\v.[v8 e_rOnaQ8,3o\hIъ3`B2Q:kS42rDR°Њ6lc MS(CE~[4VA[7Ew5w܁ 8ox<ǣx(dxu~1"9H<~T5@NMjF@--7JujԎ2ÅuE<14kR"* UQD)eIxeL(93Y dEKPhD;N=+hN_'@n|#?ߕ,ɝO2s;|z{zVhw_ /?&5hՀg`5I2uD տ5T4 FiѪAGv:q€Z <$fV*YA%SRRfS^2cp[&eb^L&쇺o9["SpvK~_ ̑Ysc";]^MGr_0xyU2vn??7CL׸L= 瘝Sտ~ۭ. o -xG^|H#Q-XP %[cnnJj)Bx"UĺI)r \J}ͪ-i޼~(oKVx<`U{E*9wO~)!/oB!tuIbmxɯhb2x;wJ?NQ#B) H5˳R,^T^h٠geLZBuG;!N(Y2#GthJ-p:>s AcV95)pg`Z\6p2iDTq AXr.Mmj*JAd2%3kru!ëyr!j~ĀRp3n:'PRI+\N'\FD{3 ߊ $$˥ks(v!%Ĭ O90@R(]TǎTp!ML6*ZLĝ7`>:{u3 w#_00H.wInq">d~Th$${O5IɲlJLYMjUտ2^rR;9OT1k΅ W(0uUÆu%tu0ʼn1ZSŠ L_gX/H2XjTNԨ$xdhhl!82;ZUGƦr)jR9 VI@*}SrT*g8P NĶT9{}ylsS[SIod+t6xumruUuZ~@m1:`^ QI= 4 J*)M#T&4ĜU`)U: g )f85c2(PR vm[Yo@^}j9:j}WH$;b)f,OdYcjѺeϦs}6z;N䝳#S`/]AӅQ1\^fsK1{p0j6 YWq0F7oKb&t{2G37`3D9y&˰4k-~e.G/}-㳙ߺ1EYerIA%qb4HNa6j͙DQLIPJ<՞J1p.V2pmdS@3ͽ2[# W{Z\4Pu_?;Yu<ʾ{UU'[ U{J'&9O)C|} >c*S6zR%m*iėPU0l*BRܳ%bmD uCSGSMSZ #RNii.,I/ IL @DB\&2F"ߚ 3h,q.AE`jD>"VHђ(Z3!%;"@rFA-,db&)I:K`{)]`oơxX&qX# yGI.P3o{d&!!\r.Y%vHNUEnr-3\HN<׊QI1ZF@@ '%-Ok2d/c*Ue]kB[ԻOLs.t{WpuXK9PBKk p"* QG!"tJT:[k4Ծ3{4r\©yMzZN^WrFG|q|WӶifz'zo7s=ՏO(,Y4HYt!@ QSTcwZv.@w(@w%@1|">q#DUћhʨ*T19nkS8%3xC-@rR!D"("Jw;+Ҟ7Z#gObxhZx1-68pW:F+T.[kH[w~w$W e6Ne9fC׫COrL?7N΄ "VoS/f~d2^?a.[,ʰ_|P`ܮmz|wqI#]rGgwq~G i?Jw \74˹?Y[$V|}4[ }~Z;{,6?mȶFQgUJLxLB$ +xe0Md߹!2:}<f`Y8#~v\;</AV66﷟4OUn5˰A3fACra_uy&=.1Jm%XHpNg&1ґ(K:& .,nD$G[BH6LgXGьKn&&Nl" K^rg.iO-;RuOi{§\_USVnH^YRCCdhv9-΁hHB堼݌&I@smH_/c!@8 RpjҔG-XʂFE5W&4wS\ڐ'z/ivLI{vh1U_Yȶ>D$Vջ,pyƋrZ+I"SAd-y))Jͻc~P|Q6d;HhIB)`T`+Ĉ*&#!}5 ZD/dORNi GcBM.[FEL1Vz#0o{-rǼ@(%x¦A_1lE~V!L]ZFlأӠ#Nh8]dAbUI$%1G y?J9!Ѧ$AGuη> ߒ? z|ω`aWЧ'sd#E3,xO%O;T;7p W琩 iRrehٛbg[-)$g% a*R@T2LU]^˽5.O*n9 $B:3\%̧'Jj{Љ]nfrB ֒ at@*HƢiF S-n=U3qhX` u{ja݇-:09U@]H`'puqD@P4RA3(гIؿoѵ)& Z iXˈGP݀@ЪBf6I(d! Ô r #]A:$wx/Iw)<,q/:_cv4y sb>gܝ+/ +*R5vUv)kf6JO͆%XjT+wnqn]z㹠Fj?={49rh_tS懤~~i8eQ2FW#(V-ĒЂ.lo0 vz䋲O]?^D/T_+_g.B>N %?G:Od8˖7ġ a: "{_ϥ^)~l286$;byDqF {KJ%v6%gZՏn \澞O)VPw`=%!tz'Wqu 'r>׬9 /Je0qȕbG؃W7Eg5Œ*g !G: ΁<\ri|~ǷeS{̸Ο] Wupzc&g?yJz?޿go.$YJuW,Wȏ?T* u$<4'Ġڲ?6zgn~d2'SzHSi$ˮBE*)NL.e.&XNNj4HM%+JQE'ENj)mP{XC^[ xE%I^PrjS$ {[aq=JxBA뻝m :soYCz޷sXŪXIX:&dbkZ|?⑿ "_'l qOgzSὼE-U 0] x]Wp}58 7n'׳lPxX~L8tcDq0_JWn0$Ѳ6NWBD ι/.k]cf:gէ^}5]?ZvI 4@n7(::׈xmzp9'r(ɴ9ҺR=Z̾[[਷ C)8^x\(a+c4-HèIЖC)(Nfh,PB -ieb֝6.rbjRئ\lWUDJLRa*6$oqN"WYɤTҤk^tS6]m]҉)sJDIdt-Y5PA0cd06*eZI3ruYTZN1 늳o0GY-~W,[*$)Bn6LsIPZ*x,! F rLp+&QeT,yrh.jgqd{A /D"v`QH#&Lջz}uw>f(uH,R4֧CU.)BJqYX Oϛ&PLNԴ%X3DE)ɑ f[4h"ɵ@)JA; &uľ|э@߉`r1/Q#ZJDE6ZiNcOdI Y嬼EOchVb[j)WB&ȍQ!c& ˬ0tdIP(!Qc34W*KIP2L$Et Ѹ hԜh@j%' a[@QQ@>p `-ĩPlgUGHrCS-(jl%~#^<)-J*jBӜ%d FXaXqX.cH6l&7SP*ᩤIZ ˴  l3hBfWj #n<Wc ve<` kNh$ d BXfH 4}wP7&|H ZgWaLUC/p`Rp.^"X7, PThd_d ϠdDC@({3` Wpzjᐼ 0|QWHPS@HTfɮlB)vNeW㾇.٧׶%T0Kէ :YuN:Z,D4pP(%G Pe{n@Hv'Pe:o3X+꼵 N x΄np1֭EĬ⤀:cHQY!@/0#-|i=W+ MJ[0?^ ]4Z.U}ʳ:| 5kȗj,(t_XAjIPE"eLԴQyeF>8}V8LtaX0Gc@SA^|D&H.: _STݪ76T l*^g,Xd*P?<{Y|qgD!m+dY~N+Qt`p:w>;:ɷ{%}+pme,{$%Rb( 7D. .B #rlI5k)2==U$\`@;ر-P_E1DI7)$d@qur'7rם;a@d/ DR6ȓ)H=~ƞR`E?AY\/[v@?ߔiA9OI4z g .u~LV-OLֵd=jľөG2@~`"*zG ajbƚд醙Ujٸ4T->d1jaV#CO[ttn9}b^\ɫp32tJt|JeXlbs؜,6g9Ylbs؜,6g9Ylbs؜,6g9Ylbs؜,6g9Ylbs؜,6g9Ylbs؜,6yAX^kr\b֪bQlpm)%"_}-no{A \hbBaONma#@ڱ}穪u 4A>rqs~i˵;9NGԯsцZcFu|{vz9ՇEUJ/__AW[ߐr϶6Npr|}YTϖA/uqv@ zeWgg۟ / >P;7enqe9n퐝weI-BWD?P7[*{/q{W o_˷kCg|oڭ[\?Z šk\5׬f5kYs͚k\5׬f5kYs͚k\5׬f5kYs͚k\5׬f5kYs͚k\5׬f5kYs͚k\5׬fO V ͵&^溛K\wk9uu\kMȭi띯N- ]׆:ya=mTpi,h_5>[dsw?cƟo-hj&_!B#! vY" =z[3ȉȢu8Xja1{B~ڙ֗*W~ݟfoZfD[75JLȷ%R,&(rRRiDkW]L߾1N!lsb:H/w:N;ptw:N;ptw:N;ptw:N;ptw:N;ptw:N;ptw:@:9\:\C\_ {lG"yJzG)RgϫTޝ U[qާG'ot=A?-Өgg"rJ૲3r5`ƚдS>e pP%ցa_"CO[tzphQJ^6~33b _HWt_`u |htåqOݥk<):+cgJYa_R3-is-fZo+fZ?C3bsCʱ؜,6g9Ylbs؜,6g9Ylbs؜,6g9Ylbs؜,6g9Ylbs؜,6g9Ylbs؜,6g9Ylo7^Zgˢo5Pol_o;ZWϖ oK0؊#\`m.u`O!\z8VX^y`EfOGZI-&pQ?BLXi=[^;G][^y٧2N׾wS]_ #UY~/o0*R>+0| w}z0-MUNo'qeßC7՗ _\ZjV4URhU|̌|vnKjRUW, VR-Wcw[3XV(e2.+iYi'IK,ha[3W- ye㯥}00 bױ]AWmb\ 6zѤ6TDL0FONoN̮Xc,,3fn\Lf$" _1n3(bW$RL9AKD0 :9OL݉)pe' 0y}o7) Д -%BtHܔFܗ$>F)?22%'<؞b|-Xb{eѩ=/}bJM[ߌ%1 ~-?yŬ #$L'ș3("s$%]مv \gDNw,j0#<1Ē3?oK2{p1=5} Y~ %/2viCn##cXo{=Π`Y>-"3|@d62$siq^;I=jt:z馓tFM:{Y%xtlp7~G7abBnl%i$bh 6,jtӰ < e)2{ xY@WG&((6ɿa(qԀһ^m;(ԣԶ L [:mqAy1vas.qxm]-/y:0ĒHO_zL)->>] gab˜nsL75?3ɍ!2 Tڎދz[&1~7gLQ)E&(Hnx8_XNhiWTZ:mH'U O57*)=#ޖ '-F]v;"l 16" 9gѓ+i6D ׁ\mKƞ팴Z:ϫj_76گh )aujP{8֪PGs>-b}un$ORy2Ϧal!X=}pCJ4H+~xWҐ~+1_#lEjj<}R_? _4pzOVC;9}ܙ 袨Q,% -%:+{,,;,׆5Pƾ…tH%_z}c^*[ɋmy?{w!p vw:~.O()jίh&mxfv==dTݺVMRctj~z6fp/@L7Y-9봝K5a-ЪKyK▱m81b`bҹVpPsS#Їǽ\FzY.% ZG 3f4j|s)haJR<1^A&Bs3n~yĂ0d@۶ kL%ru=ۧ_MhIqHdùzkh/ýіMs)C))[Fb#*.9PV9^kʕkϵVri!aLG eZ30 DMFSѲ5pvwTKaͦel_*gJ?v{uȪ(I1j %a-e徭r][OݧOfv0jҭWZUh7AOLغT d͵.%bj:6ҍ[vyM`mϼ4r3>,wܬ~ i7wqztu]ZӁ¥Uop.Ps]]ev z$b6I&PeNɒG!R 1L~1^.y1^-]'(ok,@ؔh|fXʳ,P'9wLzdP-qv0ruեqwY%Φ ńyMt#SX^udXwinc%CVgx{}菽' 8Aw?jk6H+"y%AVyyUJq<&(Ӏz?C)WR E.H./ qfn$*Ű[)r-{?Zg;iY;U@j÷'ֆ ߺc=]D4w+oϚSDS*yjKǰ.jmL* D`ڭw GkQJHqY|)Q[M*Ffd j0D`Rms١A{RJa󏃷D\xf54nV+n W\Հ A1{\ DM-`BRg'ZK(I\ yIt$LEt!"'Uz̊E+-=xMrDr4*ptbjA1 (.c}9:4 {`iWDcZad (QHLd$&B:łKs &5{J)CRڋE/6Eolj~hA〭 8GOoL.'nZ Rat$g\[a  aKQJQ]IQ#3!RL+J, DZ  Uk֖%Q!ު+BGC*8iU"bK(F[ !% 3Bs4h d͗B̮&sli=ſۛS5|ʔ 4$e?OhCzeLUfaY#ɫKV4kErBΕeNF]T6J e@e)$ŤbP,k9s/qeRqejGX [N pNQ$H{^3U Ƹ_AT[aH]P%73cٔ]Jh):/ N׫Rm7uñ$ EA0C&p9{) Z˚}䷘*5wiKEseϥ\ٳi1X874e4@ҳ`4r"rZD"`Z\k8M(rnĬ#"((9l*\$JH!e͵5pk\K+Osm\ƅ`d^TŅtMݷh?{ 8*хs= K~V?_klBGdjJ Ȥ_(4 B~`GW, pZ˼nŹÁ1t2X׏ٝ}I5LXYj' ƼϊdB\ʮXRšϖfiL6$QsY"MO@^S+ `r+ ܗ` ee`MWq-^x9K櫎+V"uv$gW]?=A iDή0?v!ǁ+̮aW#1J>]ɞ]1a +fJcMHSu!ןe%~^ٻ6Wv~o $Cȇ~ӤLRA{gH8$E5ő5^-q=5=UUUOnkZC0X uX>( ت @D%H f|?xFV' K_/^gAe6lU _-,*3YҔJ0 DS*vQP KQ׸q&B]sd'%)! N=BNq>1x%I U5=%y&4J(%q ǙsgՎ!!2D**]PR0$ǻ9w:]"-2S2nDKabGۡ|0<滚 I@R 1Uqa-T3Q\JR8"(EW & |ʏngvC.y&DW2RK+MU[|P(Ѣ DDN\y*ױeo2|:%z[Q'G1֧hbU R<, (g>q﷦ښ~3'^)wJ(?l:JEfBх37 ^>PB Xϵ  ھ P4,GYN@? fqjS26ۓi97No& tq#5`s鿀9iZx_9Wr А[kW7\|4cՏ6q K~3&a> 8I~:"VmpڒЃyz9 uɷ:SFރ?E6s珸o͟Ujh(L~K;\}kv'Ce7,[ h07>q9HC/M`d73 ⋽AE&@fzfFunF,Jd9k.mp.W]?md~Y>03~5jk>"^`U&Qcx!ps#42,TEryԱJ6-M][u,t'D?FvvO] )ϔ޷@htrP TdP͸E$ ϼ Loi˶ĪZԿw1HXRiF)D'A S:E"Һ5^2$ Dx,ooOjD|G;6>yT3Q6:GmsҨQꙥQHUW _] -œhJ1*4delHci%Yp*=;a+#uLCBUs־'CW{nLm2Vy+/븖l'Px97ǃ97rT/\ wY w77W߼?'-Mvmg@Xvg ^w:MORa_kZTmҞnu&KKIM,G/rWW؊IpzDG*Ǩ$hD荭C+2,H):@(BиP4*$]@C@ĤrivL8&(ŨP\0*64ng4Y֗F)F[IGcǎxZ{<ur7-rnNZ=tfwzmåN&nR oj#:y 0l.X™"C^xzApOSt^$WȊ}lo{LfKf*3JJL]񃾒=V4W?iF$mwe8+MF(C{N@mbAƩL[N,Dzt'b Gs`)Y_L' <ܹ/oûO~]lVq_qz2^C3f-5)}5̸*H r.+hn35J2I-ZDBG 4Ƃ[V[":E JQ*d*yb Kp1rKV;d2OmkoV|OQ;޳k: h f>M6g2'sʏw RlU֐ 3M.ar/vUuf:tG ̚.妗NJ!7ydS@;-x5w3a-Vpբ#vO6lVNc/!K gKh\]Yzxe-Dˏ7>t*tQRKC@DmK,ӋAO l?y庤6Bས& jsiIY%5-o$-TӔ̐ OLbX?Ab;Yv6=veOL+7/{,b~^Ao DŽÄw1cJ5lgy{7ZnNjSƬYp:-8)}r^Bh©ҸD)?2Rk"H(4++&~X3|z|#{|ׂ5;ͦ& O!?MSϔj/ HZ<վV稲AaR7Ȳw1fpℙJ*'Y 9Ux=\PDTZsu R FSg1Sq 'k&^ Dp7t42Lk\6`FLo^5cq{uZLxy߿Ԍ 1\92*2r-i7cOis!#oXyGq;8ǡ>qT=&*D(t`^ xHJ$ Pbd@3P l@) +|k9m+4h>ǹ͖o&܃{W9sb3b>|ͼ ǭKdȝ5XSfChg=@2e"GW2k>}28J˙F}3 ?C:!|uċK!'v מz5zQm{% :'?6ߌjW(^hؚg͙ NиvToMxfV).'˼JV'Qqle:׽޻{Dc@x8qvkL;*Kl|!qpliXe`P6')m hkrsEEOFp+o"<"W*M (JX VlR3nuˆ#'H"ELsSz;E`ZsNѾp9E4^MkN7k]+'b`>?KO'ygIj5qq󁢫es'cVA:x٢aU !9Grҝ:$UO0Fd 6* YmB%UZ3#瀢VYta18VºmVam`T6g~>/~3hE'%),â@ s?{Ƒ\H~ pNn {@[OgIZ~3$59Hiؑ8ÙzѸT^3A%Ĉ6`d1t>BN<'LԆS;-(;0LG"E}.ÂBXģmH؝e'c=#3" 85I*ʃ ༆(G=q[E#:m؊ N:I$!%%lN8uY]:v僝J՟9]ځWI_ q>I-peEHZD.j['/@8e]Z;<蹓}Y5HZ 2FTz+ĈRH& \054zD‡vZN'6DB-MBZwItOZ#U!2k{-r(i՚dY#jqQz~\Nُڙ)|lsk]m;tDHjmã$B>K QP‚7gK,q$Nz4)D@"x,H Qʽ ѤrΤQ5 fE@Jȿ2@ZD5sa=76pYT&H1C9/SRnk9!Ѣ:$HwD+v.Nk0b#GrO<P8ꚙ`&7?8ꚙZ.&R Ru5_`uMnb7 <5Af* 2Ams!CsnPPŌ]kɵ/P@p䉖Ge ޒSVݠs;2YQ[ǽjz{lN2T@bTlS˪XJޭDiPW^Z*~l_HJ_FۦX-S?48=>; $RrqccC*sxg\ل]DD" iE4= tZ%CR&Tɥ:ݦl|zlUo>[U]>;k}Z_qǖMxA\\PI CR| +?fU+_U &&OTmLF:)>bԋ*d˘pg"D5+Dş)ON8ь L]ŽD tC'o>)~!-~+b_3[ xH>uqV!|>[q4?O,ڙ/ً8+]qDotF>=dp0uwݘ㽄I|em.wLrBqTuLum{㏽+mZ c@ӃSv2"GK"gnX-%r[%쬌 Q@)",'ԘUX@aV|rR)@z˥'fS;#"=j(wB҈RWJwjץ+ΎH]e8u}{Q:T2ݩ8cW9 U__MsU=Ժ?mf*7啳ETqI۔Ξ;ӊ&ۗ}}dkE ȁt .II)<L"ϴ$x{ ՂD:υfB)jcAw\jǑ"FQ&]P`H FSFLIϗhZqTmf77Z`okMϪtŲzwȭ:kE%4 ,%9-#+oR ֣t2U8@4o B|y?43/EdR bRR@EرfLc)$xFq m >W 2$2^ ZU$Hn' sbAj/U58Ҵc)+N3oz($?E:o5 /Pٍf/8+B*~;;WI'rG6,^, N"Nf'T<F 儝\s%q?qto' ˓7SH8KBD鸜\ 7o8=aJ&:0i+[jdٳ\<鏫2h * jE@5djr\k'pY8BȲ >#(/~dJޖYpvK7Ƨ+FߦΟKqZmXL;T3*XUe5AcMu{t^4$YߔÔ JMYU*Vi8F][uFQsxeđ_䍗 He#MGF, %u$SD"Y*19XM&h8[U!vfHDC RL#Y3o^u12.Fa42Aq2j0:9SI^ZK$`$E[՞n[k'nykdk_yg#ILwoöO|̫ ;nl=0lV Mդl- sý%~NloM=}f{H`x>%;#dG] s;l3KNrXHoZ,K؜)TY˵p9DB-MEwItOZ#Un%ToƠ\-C"6 \.9K}u'l EQ\د(n;G.UK_ͻ`ɴ/g@1Uc{fwmT\kQ|W붡qVk]i= ҷ.n-O\ `+&v%DKǙ-%%LOшЁ=2~»h}ʑ@UZ1Д(Vjfq,PB %h$Ii`!1B[ HQKrivLI&(řP 82G9KTklHN +IȎ0/y#Tͩ +z'm|[ڃ=cټ"u2)qʤ "Z!0G(D'%qi넷ᝢ :Mw훷lIAK>F,DJIi yw |<-x1K Rx_:`G"a;/恎I;cS+s@$w:-;x O:xEV(5*CȝQ&I6H/T Zň\8scc I2n*$ <h}b)˔T81bklƈ["- aե*`Fڍ>?dB-ٺwG1b2ZM>Af@{Эzu Y-P0D"2:R꾧ǕkRQ% QG$X 8ڇL ͙=K/pnci kP^?Y'BhC(i~Dq;awPƸXYP*p uwRTre+Z݀`u1q]6 &eF2 ,Z#ՁHODLQޑDS3~@GjԻqw`Kmf.z =H1fZ@"3"ǟM0 D R"(pgQ-$9OCPvh(Xj#jkRLhR /EJؖ%5r6KR x4NҪkG+_>E[Wt' [,׃(r75:7H+\',[R\MLR&ʏG#;YnJ Wex~έ·K(Pg4^1%GDgZ8BceN^&A?%RKRrW=JKMqhMLYUzthFq;J%RQR T>ۜ!1C3zD2Mf'<;B?;E Toe aVdY7=tod3:gZr nv ogn-VpgY Gl6lVNcϗ59g1esh _}81cLL[z/?VL-[zWUBdUTjiJH{ˍJS<:#a\HphۭƚwOhwWq`\=S6u^V6n_r>Dq'qߌ_ o;mE2R.s,)Y D*&Fhܿј=c(kkiSq s\ e볥O֯UQn@-7J59]$(Ebh18o3 R= Z<j[#TbWaކm]n$AňVkI`qZkaI&Ņ֍EMDc"ޒF-  2O}|qJ'Sh_.Fv^*wRw]1Cms{cmiI(adZs IUBd 6X[J؄Bd,FvɸDBYXB/ 1s0DZ2}7u_ŀʎFlKl&pXo9C.'"RxFLSGȊؖ(Ur\ܽvz+݌B-]&Y \d<㸬4NΣ{?<4.r-ʿ/o/qKb^_ϝb#y7/)޳]'BC].~x-dգc f\l;k19zMmbP)io fbaW秋1ngW;M¥Lw/#X/64\ ~x%y|s}#_ԔI‘-%= ̚j={5޹@vljT2LkDPdfc4>v-~Y-A{PSTY64>~^\?V;"w;X]x i"~3 Ǯ{LS>nMn^O'(ſ^ȕy 3BT!1UAZ9EyPѪѤ/ؗLJ<5P<5Jetse۠F]ֵ]v>api˙WFnd|˻ۛL5~=SGLe`:k7o/zTz`׮YIl~=IN ][n+DD N7/TB;*)+!.穀Ε Uj]Qm$"*NYox=?c'+o OJP-HRS*VQQD`Ж5ZEʖЦZROfCL$]߼3&H|21HU:J.EkKF*)t-+CY>Ul=E>YՔ)}Seovأ#!㣣 ~t2_ #|9gDEYEVNQ>|ck.3ҙ3\JHL- 䦗oPJn[{3jL>qp?Ϗ<~[fB-Yj: :ckEd6},KB|z=J{lמKU֔c}k+f1/kW\L .*Lç ޹**T2J@fVDߜ lt-3Qzk͎޼')b~S"zZqWQ);f=ĕաIH\!ҳW\&E\ej*S)D/ޤ@eĵڙ~ۏU#~Y6[՚,)Y D*&褎h%bL "\ [Δ'o@z%T'mI&.q5@>h~"/ZD%` m2_VkS%eů ea/vn3wtI_r]v*/NB'!}U{)C],iv <9T>Zx_{k;]6#iQeGUiʴ)ʴT#ԟ ̽vun(rC"*2I*6>TLxtXiYӒէeO:Qw xZcgAid-HjD)*isaBюլh;x@3kLHSn/M<(ÁDs=" ,Z#ՁHODLdbXv꬯?ﺔN/tx^_gI{nWAcoW1+*_zc10VDfy`pEAQDk-$9O4nHhY#t6r:᭶&!E.it*Ř[d-999Yq.Qc;[zQɖrvEoYƲ)ssr;^Ysd.U>'V*NN[Y22*LXz;9ᝳ# \z?Uek8f'(}0#%>|q e ܉3凛l;@w/]=Xnе4[i,j-m6 k{Ymڵ~vٔ 2̊,N U!143瑕82Gb% zVodA3|;Cvh4ӵά#7j(j`Co[g|Fsv^Oϗ|Y3̞1{<cY=Շ3V'NJeK諾JJJ- n;椻YHۖqAOM^uIʭ{IwXWk7 4umI>V~5'7 _~XJ'%o{pg#9dRy NBP#$HD-k٬hˮhָcW[ڪ=mCU!.Q;-(񊠍Gj D%L{.$G{7*kQ=p6K^ƖehJTPIRQ5D%UM؋˂"VahHĤ@S 'Ъ< 2F_ ƃ%E!2aBW0$@˲9[fY< tGRYQJiҹtd:IAkC0o9)wΈM`xUhbqgl<[ٻ- og߸d}SR*(ju7ֿ@T>BH.ڊ#@М0"+҃8ŏ!F &YvJ$I*E0%cDsLbp깷BQ G/m͐XY.:)^`9/:wKQ{M: jA)NNs\t%!Fg>V,x+^﹖gP5e>iwUF m}bᵵu-_tT_QhC]5k$b #בUľ?evelG6a&9䪣q5Vj`RI4 ;##RWg8J]UrQJA]})aĉ0$5.Vuu?A'j|uuO*镺"qZC^=*yۊgغ-TeʟVFvr߄fkZWH47Dmh+WTY (u VL -C]KzDȳr"^w%c{?a?] ?i]Tiv#ua ݫ2`1^/?zIKZ-ôْM6}x~^ok`n0z\BAu%EWK(P`@cH%HRxƢE'QQ( 1KhǮ;}|C+9c}P7t B>k4lbdB'K2t)u,:s֨ n2>/ 9hIO\Ӵ{sIE;Ȭ7>KRzٶ= JlZ+A^PI!QPG(dQTwGSi%Q$-Βnz䔅 5H`Fhd 0ZuV|fgMEZas *CpAl/;mc쌜tsx6.;NMOv\bJxؼ گ}~[ W wV n8Swç],9Öo撮?;Ƚ-fmnK6m&RMҝ-\f5z^8_vBfͭOoy?jݓfٞ6\=!-iyneݐ K⮵Cu4n[n+e\kw2|ugE[6@R1I6!2X>k+>w5%THV=9hɤL>'[ z</֌sf܏R^DvՅc].|R]xHj'Oo'i^ŧE\N5dX~[^IBJCNc0BPE!ŰMFXuQ[푗>IBLMM4&#]2=a]D8]b."۱5vMӖqwbNn#{(.yQFO bQR o|$9IL"{'5b6O[ G3<4X)G~/abܕ bOԇ0PR0uð ړ D PiPgLʚ(!:DEV23H)Bo U.b۠e$g؈ 8~'v,+s` dH J̤𖵨vMQI-m>D@NCNB*GwI MfBw!T͘}6p%c l 9%r}q2'_(L/*,W-c4gd~2&Xދ$(|s9Yd;_h*ke!J"N6i<WeW}Lbڪ{A˜^(Г5٨ӛ)ņ|Y:*)#ykDAM7A#%6@18h]A'<;@c1~s61Zo@)NKJ2#䐁Vbjyr(0$\ࡨP1 sI TQ66t]5+r4q"O{nnJd>s ~_R3wa>E~@yv:wɹdF97: bYgxɔt!i^-"4وV*=sA"|nHJ!GP^(IC 8+"!ӵN0WgH|IPU+`(L6h#T]芜!M-'Hv>o̙߃o.q*P}^H@_{)^m>#tr!TlZ6Vh𮢚o4o'ʎ$q*'(%k+wBeV&E?2)S1%4dK:p% 1H#/}L\Gk9w ։z%vH~h6Eut>L[po4^,ۿEi[\uV~Ȧp G^)ԛv(2ntƐ* ul:~0,ٳ d$KA-;|+y>Ӽ*,D@z[ٖ (6ϼc.wϯLNFj#0EQJF"Rb^RE4FEZ&V`"λ,تD&,R)6U u+Hil;HHٿ<,Rp.U +ף-;k<]Tbnȓ,rUmg"i.UuDx=iqk=57,խF;-PYxY"xySyNb}ѬRI6 +bCףOR-X3p9cx?{k zr4ؖb%3k7ew >Z\=^~[t"NfOsL#SpzN?+-β Fi'en?9./5|6hKP_d\,{($PB\sux.L>,ͽ;׬?8 ]`"kv2Qo'W+} /8f߫e___f>*{~6˞dQyQk_m_[ hj,rŌ[+o;l'r,33f4oj;I-![2Wy \ szyiF-N3>Zw6/ްm=rVX>䵆{u_ xDI7ͬh}VݭC6;T~ߥ)$oSUlN8J²owz~Lu#d$B A.#BYQ0* \)*z27\9<%i" iAp<cj)GZ| S<58,OsIVb>#͗vٖHl1)^`.:z #CpDU@X3*8kPs cȱx4~ m9' ng }+a>'>kw9= 8I %;'qzwrQy^=|MZd` C)1hSa_SӏqrC 9ғHw'ɡف^{=/yc{Ĩk|=5|cHCwy++>]SAg_ 5ޜvU+}Я102Yhi1ݎ2~ &^^n~m9Ե;bۑ֑ txBDl,0!b*8"*c'bRt"WHĦI."ؚLkۛ)9`0i **_w_Gl>) |^೶_7U剹}k4 l>x rjϓgp2|.矿 p+9dek2G5:B`>ZCb$=Ur.eV1, !*Vk,F9RRE'G([d%Fe:KLFrA5cMq`_oo.>/g/HcxSsSMmV*d9R$z%AM/)2BϤ Yj]dیH ;bl&)->k1D K F%{RT% ߚ캕85Q mYp{wdِƧYɳFHjY^} +{R1zOo:OLb [enH=r?RΏWuQjܙa҂y:0;K񐓬;y{2swg noM?Ve"l.H>'yZ"_KujG  m_UaVlYכ7ԛ1]1bO1x]C#+VWVN_Ni͔ujԥtPOZR[拰]@nX B6Zm[ظӋF?߂/ ӫ!v+7&+ -cz8շhx0jEZ>qN,S} 3Yx~`Jwf{os7]NiyUE'C, E\䖛q.XRi-xmxr9^%3K-OLWq :3v3]^5izn?^\9og~6b#?&)ipG39WܰJŴAR*}:n׳釋kע|DzQEgQÂ|?ݩA;N^F(M p~h>EZ#đ"8OR,#ʾ;R9a *0ʓ1WU\bXZ)URb7W\Y꧀hze2pgwɹK_r3Z9a{utb_rbPP>\p +b` 1&MkQR DI{u_ rd吥)@hTpE 㬝X["+Bb8 QCL(lO2@ɤ:] 㵢҂ftIX3嬝q@ٜ9פHu:ֈY\ϢVKww$g|QVe '-~* #rFENNzE(tO=Bz"%: E0]JDI%)Yxr_9}B%圂:C iLX[n(a#~[WOh\ri?r5%ד^vaVis;f*.Saai=VRufL,G<7` MݶۋI\6rY/ӥTﶡ`K*S؇W4 ~4!32訳c[Ve2pd $٩ηfk&v{8s%a m5?"R"nb XlBISAZ늊k2ZBTƈ>=:þq%$?ɨRLH!6[tY#q,4ml"G|.Dhݖt[2j5_NzJIlFXF즌({DYb86/McZk^J.M`:x/zr܁ȓNfc9ʎaW jL΂*% Kv.X#Q0 AWh%ucY$!R)$ 0h@mg]IV_xDe+s^|+C,ME_C輟j^fzKȬWWyЪ9^}@1,nvҳ A@y $ m(: o/Y{^Өd0&pAzaSD9K%IBM0h}"E&m7(" 6O;Ҏ/ɘ()gD2rwƂJ sR1l&ɀ5A"~lt,$*ŝW=VI."iA^}J,/VtGl(pP/zM6',)8klfЃzk= JgE9cY%.i†8'}R)q`h]h8;>0Hc-ӆ*˚/:۰:Rv}aqEYҗ8RkMncGGGBG ã:}gX\]Qs9(9&B=/|P(3XR uHHd^ D QA&ԭ_!4SeyX1вdt3gǗ2S;`=,Tj?= *BTRb?{WƑl``wl0@ v̇a)qE YWbSgٲ篚B Ĩ"[g7k\]8ÙcPJ~.}/;v MQ_ mb,nR 1ןOXj#'zÑ@tr&/Ue2b@컸;ZnR@ŝqKYgDM%`^L]|o~2bl(SeZ~Ik  Z{z9crdP>m3IpG6 ̽~D#Lܡ\ LQ[;:(fHgAOm#:mA, քTbK T Vp"g%S`iL12%tLU-u|_e//rlT$^?Ղj~wehЀЀ>1èo*nY?u(]d *0{9Z׿L&w4- L* ,`Ѥ{t A2J$huKl$^KlKM,/EmtR@9m:N =B [+5 GVEZE+tGJ# V3FPp7/-v VM=s A.ܴs$IHPlǾh]:=Q4B`JRjsPDy:{f"`gS"cON!ttNFZ}$ΏWg:/yLV_IxyCT2Vy+ۋ:r~K!KxQp ڥ vՁ,v%nouK?_7øFIk͈8mdjlԢ ?Fs%^WEYgP+ Ǜmi8^@¨3PujѶ@MTr@}P9'ta Zq+\h,[{V?WYI~ 2w\FCmּ2NZ&?缒+nq_qu,50$瀢 y4%-t% !+nwwEvk 7B2QipRWx LqErH8.eu:b*?e=6e,슳TE$'Rṝ| h໬pݹΠZ.qQ3D-mW8E^ y<`)`uoȁKOHhJȫq֮#Wׅ][~y:JAW"=\X2LU*,Eg*tW uvJT+"RCpצ*eW*QܪpT7WTjP*Yg*+:WZW@D1)X|W`;W\MW@-Ƹp$=\E 6.;`"WUV*q%WB#F]%Uw"\-W@-o=QIhWofU"CU"Wv&2բp+^5[v H\R9X'rkY\cι`"Udlz7GOIO-cgmI 9Zr;Afx\舎#K;,1>z:%(dgJ? W@j nQIZv=hW.=GQ#vuwBmӮzzR?\͋*;3-Òwue] ;*=U8b <מJǗ?g<1߾]*O}_~˅D/*xKyw&>ǹUOK O~43O]12,Nx`FppM 4BKq}MlL ߿ߗhݢw'Lk*e+=w4!VFea&C".Ior̃*F#BV"RS6EoGO4S!-H^ L95\5#XƋ?]r@ŅFE`j[r*gOB9}"'_ye_VO_g[g46h9s4G{n"p!8 ̉G#j#:H!mH Yan;o 5 szvp)IP᱒hˢË[8gNps͙2׆~}K5DўBgZ{5KwRYF<ZA m:8&tK+z|Xm|3<_ᐨR[RTu@ --ƇF$d&#QHYu2LwZ DɈA&Z ih8O1r5ͱE$FN%gӯ!x[*XzؤX^7sk1 'C[e0ŋ]OfeoOD%P;]^oT12I}!9.ǟZOtjSUykhf!̡eû7z^7~+-t2~i=Cx,BkOtMʨWaB|~IT7 b*sMfw2wZc17kшBG5,d8+o6&!G{7ٟ nr{uK))EH8:W#Az.Fj8P3EPs&$_jVdه&JF>_18T;%FiyM)>(:C( ]QQ7 qfAn$*Ű[)r ;#i W)2γ3܅+"|GbOVvk-}6;ދ>ӇzCig&Q(ʂmȩ )#9aE]ژU55{[(֢R'brK%bdF/cLj#{W8T, c!Yp#$5Oɋ$E/n6*' 7a4,f8b8C HlL8+FXE g("Br jqFtFLC'ZK,M* %^F0ґ0 BwmI .Q.rwn#S¡ȇW=|IJe2S_U#hl<,hCڝQǮRv,M/{p+C"t$ApY0U@Ȇ)/")~+d!2@VthH$dQ 6Gx2RVE2x߱vczKmRJDg9cƨ@rt'șB57mđ -C,tKΖDpS.c#eI/Kwg7}ܷ0RMFmS}Iʵ2ECz}7eqAI)R2Q-$S{nU8}qppl䳑dY JBpg34p4\aAW֭e/\yz8ҏ*-:yR%B`ڃN ,Ѯ 0CeSk˫9*[zoTҪ6ߝ;3:@0Ӛ ǫW.Vak^jRx k y5!:Mee TY?,3i-㿝0^s`m|Y0: b.ORhgwyP@`R)΄sDg&juAR*XV*luT,@"ؔEF&:sqCg"Ts"J>}HAlX$\:P4^!@d+ GAAAPi_ )cFqh7eN  ɁVZ xSIU nu^yTejn/;;:;ZI>'k9&2)J(}qqXY4H)̙ @h!fjY&71J Z㖃l!@O@7qA,8m.=3h\x\["1K=vd6 Rd9h@b Uȿ~hy@ DÄ !ʬcA!3,fnRD8SgniD12*^ & K 2<*+$(c+4\h.zt_<,BmhUL.)޾aS } ٰ݅Ŵnd4u/3KS[O͊%8OhH+ڻ0UuL-g05' o ybS@XhVJjh0%W<$Wj$[/|X;/3*!bp}1HSaSILB}jq$?1IcTPs#6zR:Yyƀrjv1 ?^|9'#a׬qopR ?& ?m?).!XvWu!I9iʌp-{崄 θdMK#4xG̈́\=6p 9@l 2뗫tKy?HC8l;rO/o#|K E_VU#_${r'#F|]?M#IU-b)m+y6i=9/r'K<Ǟ/l99߮ fQԠk8;pKLcF% ;b&=\wRoIꐮ쀰ݱMΪ(b^έ><-Y췊[ʒSo5AMV(UJD\$2ze74G7u>? Y<BłF+V`Js2jtɱ180܉ա&v,|E-c>7+]}āl d i/bW+bߓ]DYȠ&`kS&;9 Su!ct9cvɁk i]:_ޥx*hoOu\u(c rq]^]R+VGA7*†Մ4BЅ+R7yq84܀h%_`==ͯ +xp$ -k MQK^ʶGZSb^OB` IrL:Hk͙0:zէ#n:q8猁Gzm.c.)HIy) ;uHqY%(&@K.#2 `RdǮ2tR$ǣ |9 qxq𦞗d~f"p^i~</_:?,Yn˥*zeVs}$wG%fK K|i5o}I.Ja DwubĔL!楏bfF]eϓѰ7X`5 ]ৃ_cp9yú_TOɀYr@$7^5pg.9 V! 'agtRfmzt\`Ʉ8yQTֻL+ Fa3zn*j鉩rd]wl9sZx4MKkozq[;6Ŵf,_ɲe}G7z)p~r׾3ws>ڐ̥} g9I,4c_v'ԮaExx4ّB r P7';$)%:5"uu~IE$M4S_ӎ6{^Sy-y 3|ĽeE3uRo}v6l=yIqxQR zƤ1lL6E=OnҒ.kW3?:ޮx;Pj1oG|_vewxJY"v!3əV_ 1zLNtMa`$ȄWAx:S(0f{gc\.*3Zt^Vn=|b)/CPW W$CӠooVhY0k޵~GҨԐ/C{#}W:VR r qD'\%^pߟZmyViVI1r3 Z|o9,N~w2 ~~L1AE}+ : yT=nb[σ]rRAlI:~~~JTOfW~\] 7ꪽLߙ\T azڋy34x\iܵpWUDzXj)8Jry\^Kad:)A@z22*C@QdvF:hVxNW}+cKm0nvxV[oE#ҧkA+YHi;T+M1tRi@A[]-LE^nux;oȳ͘dݏh;`\tm7ilb!kQy Hb Vq XfpL6DvnzmWs>>\Zõa4!Ai& ZF\Gr*0 >?2g.Էq6-Ռ 8u n}Ƭ!zmɵ.r3iOJLFh EFsF$_aUK0257QctP!D5p#"dMg-#SX4FF9 X0d p_( L2fBɨ]Rur}˒E# ajëx[isSYæ} ٰ8GŴnd4=_fJ?KpzN3Vw}_a>m{7&gS ț@vtX/==Z<(LIըʝV~,j|?FO_? 'bqJhS7xx]Y6I.ī'''_Y--8o06C28]%lʌp-{g\2qtŎΌ%o ۣfBj]M8 Ɏ] d GtP&gzȕ_1|:fZ ^p`gw0`xȎdgYbK.)4$bW_0?NU5/i}JW='=Se<$drE跊~D?.[e6r٬ 8ɿ\Oux~9Ϗ;SNAћ?9z#0fNuY bᮘL [)*"WU Ap҈ޞ ZC4oM"F(5 9-ɉ T%dZKcM̓ o{ _̷WLž>׵_u 5a x U/|VlgE"JN)E+JWsBaL6Teox5[W7z/5~=;hvDZ/0Vʃ!;/Oo_hzCa9 !=lvc+v=G듟}U C0y X"E'6@H1L I"5(Ԡ`8=VM'=hXVfY]F9֛Z_v.1g쳊ŗ@Z41D hr$%I P B+'HL[4 o] x4Qߚ+CO ~jAw̛Gh߼}A̎RiODIS-b`Dui);1ےqbU \TR{)ZxU0E'3HIl MXF[R賷Ѹdn1 2!*rry7آDY~#b,_h#h!E- g}Xs#TN[1$րOf9|>FBSZXXkca0 kFHH,*hfǾ8y V>}"T'.K[=z^Wg^*A"`jzD$L$AJ!@Q& Yg𦠷.٠'%:M=zRKRɲӦ2 r(*92h .75#gܸ)]L~#<޹77a>Bw[OM*٩X{XW1nC#$>yMNP Q;JedITYу)loYٛ0Q(IiUDKp3rK8 Kjktopȯ)zs]1Z/dYZ((ԲJ'UMg1` ?FkԨ*//xHgQ !X ٓ.JeiF/kKL9ZA@Um&_cj]ntyE~j󤊽Z)[a3  Po*:g,DNKixΧҨz֪Q5#CtvP1IvOD Y# ֖PĒ*@J- Rx 5~Dzq f_:˗ǻ#㮮Jw&-Řjrz/Ϫ=M7,̌ӁIXlD(Zl+&-ss']2JbIӎN'kh1j;{ӖxLMx3ؿٜMpOQYp%Xܰy-\#kėboX.IqB (!AGZz@%EI蔰Vqb8ع{皍{ (~3kJk Vpz*BP.:Ơ+39rRYE04*6Jr`,'ݠz5={车T!.rlg(Į=c>׋s!_[~<&u9;z9OS}[%vѿwYWouw' \^? gW4r5<y.SlFjM|rSfb]xfYe!&I<>Se>A ͬ)^$t1nn͓|o/T'v[7^7,s}8eQ=8Oaq׳zt_\^M'78_~5ڨG<~o#ƣ Qӻ;6O?)ϓz^<]|A)g6H.N ;udIXE! yjyyjTQ4$m:DQIlUUJXsd5{$'HWhaZ F4 2P} L $mz#ENVy3rvrsr,W^˛]lӉs_ ֛mČ (x^Nx{*5Fv#X Uک a!AA ԟEz%zg 2io0&z|1Y`Rцdt*Kl&E)hQ8$$kN!&%( D^NqaNZkEvϮf7tgz?HYTJN#?i껣Ԫ -./#|x>>}t3."Dj"v #Zl10hQ6;eRh7JX7AJvִ"QS@62٦,w )9ϏEҍe9;xAFIh2lYZv\SJ,@"z[HKD 7$&|CnB!e%Ǣ6#kK.$c$B2PLȭ4,*1$K'1Qk8+#r>'923YeTZԶ3^:Tuf46Te[p3ht;+s^LY^[~;4u$}-n5eХQ HøQKrylFze#![a(lRI!+I2] ]CVQޣc:?|bõ:!/~OzN^}_֐YwwbV/ÏPea}_L.Z0tH鬡#MUrߣP#I[cՀP˒8Rka1<@O[VȸϠt9)9E)"ZgE0E#JWm|Ռs< }s&IGݼG̳f˷&&R*d^ݱ_Yc 얩S_\sdn-rȍ͋U꺋P$8{dX*\fYD٭|PeڮNiy|;E+#7C풸~%w|ϳ67h-/VH9Y$nXsˡ}S޵\./&cx3@/Crrm%1C Yd/AE'L,..eĢq4G}$.vܞ)n@zpIȽ!bӭ韯K˝̷a?'In$)HmB̕ "DhtR>D MɤP\=J{fݶEP)l=m?dk2 Q%9#9E4LBP}'5}눩klqojT+o(,EQڨ$LTxB6@+JJH9ɪ'ZGI˵ FJflFtQʋf.uu{8/rM!Y<_.qvI+Pv3WkccF="؍8TY8` ђN! 㲂!CP>ن Z{om37Bfd訲c]VfI"Q]a3rި_q08}cG8jMo*)!kM1)_%JZ3\PKuTb@ж.#i |NZg(QDmTY"vLX3ZelEvxP א|͸dOhL0bԋ^_yEWq6Ey}/ާ,, 3H:|9, &k]:;n Ym41 V;hS"}gZ9bicD gz9_!if%f . b cD7EIJ}7Xd/QūUmVɬʈ/" JQ#4rADDžF' 1\?ygLDᐅ ֶg[p/Zp~e渫b/96Yb֚Jbd:(8@, 9+,c"O$qx q^_7` vs]m{Ͷ,h}V'gHj!2.ejƳ?ԡpDrQ:*1#۾G7$&QƕT)GKUe[\(hQd"Kr{XY\yD؋sL6S .쏻hNc 5O-V RmiOuSd N5/PH™Sh^h}_J~]UyH~?> UKӌ݄%e,ny(M%nr-((ٌ bstpBqtwovȃ =n93 ͝9RZr(SGhϾ5je g׭бZrrxC#+8W^#fIr(ȯ@8ЫU+;{r51w>q bIHt6JByt\Mn+z"J>mo}P&̇[rme7r;ʼèR{ni]4-UhdI8zgF7v"a|}ˮ.aY67Ə3\r 9/ o,8{pɥFw#/Ro]jV^uΪ29zBemQW  ћ7 NDA%HI)ь["&hxP`РAStS 3%"NM88Q'A S9DuIkȐd3S}j͵xGq08ZzuĞo먥{V]~YVW]kSWMImkP+_ ݔyxAfw{]ҳV2[V2b-.G p=ו1U(c ŹRǻ>R.ZcOx"dO ~pj-~;{IJ.~7zzO5&ꀭfX}Vgk{ϧ=Ut_s`mo[H  :2r. Y{߿&lM"x(\Y\Oq ,`^2D4ZOVH\wLJj7 976` o ОK{Fk)]ϙ]?yx0a6uvmOz[+6i{Ѷ`춙Ks*Լy˓br63J_Lf&WKDj5a瞳lƜMň.Y핿-_?1 "B -+@Ÿq6mN62`ET)E(Vjjq$ wJ@$"%P3(!pSN ICQ`T Km"c&!X̵u^L{ =-N;UrpdO~*{Q'*VhMAh:  p8P#Z~'>Քj%2Q*zb+VDZ)GnV`hT;^x-H(S$Ɗ9P)NϻHuʜQ)؁UEK%"V +wfDS> $erҥM4 DMZ.49E- =FeѱWoH&9d! 5z5b 1u'>ń\"H::."H H(r<bRMr+}!P<O^z4~/JY~#>u/,xQ@A*XWckۻ`}W<ǯu$uPD*G yGZ7`5s12mӠTA>F$mwe8+MkQN u*.R.څh 0Nej$FYNѫk] `; ;O].aYǣ' =01nkԈ\6P.bs( $ 4CHvG1۠TF@;덶̹8p+2LP5$^p޴eٚT޳[&1a7f̱ Q9E]eÃz`9ųy^08G2u_/狽MbUۛ]'D fef4܃[4d +F@588:{Mkܫ(ZBh .Ja@\L{ˌJ?GXt.FBXWmqVk={)ϧKS?kąg9 6gx-J9i~Ou|..csmiUC.]x vl'DI^=;i) 5EQ Ef~]9]eZkYJ' S/-aͷVYۥ\.* WͮPf`{yۚ6IL?K[$^'#MC뗴D~F1QMꚷyZ}zm1Esck*6N&`NI`)5 m؛T+r3;9oWAk\Cst79j"LwKSk[Í^aK *(w^V A_k87w/G-/oNg+[F7B KdQL\jfЁyi!)DCu$,b\R-HL/ $&P KG4-l .\9۝S/癭j菹1SPt˭HU>oI>4zV RpJr<%":RZA\!:&@.$QeT,fD:τB)1$0p8I_nNR$3^0~Z|Mq$dO&3ŖdN g`n٬SdSm0*阄#XTgO!HӔh P$%sHVTKX)w>qe5 \rU4&|eҁ}uuIV%Π Gaf:| ie+RtYEn U%znqc+QF #]gm)SI&-!ZyVHeƴj:6 +KP׭kt6C[exbP'>>Afض--%6=\en5i=qvo;ƪnN.qf;PAxZMȯځtKmW}, UF$t\Ar' yE׎Ps B1{:|V׈YwVK~07~&N6CҙD{FMa2 _ c׏|\|+.CL jB'2gi:L\O?~ȝD#IiC& ^ ]h d0{=܁ݗx]|FtGEt&5}*Ju1 AyTd4 >}Ķ%KA9gLv(Z\LJ nz[)XcäR* ddd$.2#L2GwV:6^1qqn/q͌'RD 8vNE4[59kOHu?o.-M/fݛ\m.Hȍҋtݵ+vfw8{FbOe a3\ͯ Uvk߮6{?i˖W<Ɇg/cܣ]W4]z?$yq7, //56b=ڮSzR.:Uϭg3웻8wU?Af˞ FݩU&Α%11eߺ~9 2=o @vip P:$it/:hk2jC]}. e,8g Jb6,:cQEM3 n݅Y&6$QP~ɉC*U($"!37aXM*L{vkB"ѫ}@ѫq['ŖY*o,Fy@9L(ep![\Rܣd2ڻXFc޳c\I*"1gp&qe2R-c5qvȵ߈Rb }ml £p[<-kx_'w\-}Ο_йɇWnbG悓eOtq1+t 8YgQ`ΒRU-C"E") h&f$9̂8-s9]mtk6j7pDbS3hAe]NrDI|8`r218LȐyA&C&&HC!*s>fY :*jlS,q08"6,1x:"(mF$~0Ϥ3NTI cK)Rק7N $YNwF& 4R!8]"VgE|_XkHkFɞvQU.X.do<btX}bq|ZxߙZ}#rFי~y_!+)^&B4מiZ@ߑ`p9xz@9xz(9x^9 `Q )1BT(1^yk= 󒾂 X cپuy**1YREg}`CiKh-qɅ]R_yDe+ޜK*؜hmNAƐo[J C3)Z1 :$\~@C0)&W@8rgb4u># P%#h(ոQy\dR9lGP#!3e)EU $G n4'<0-qdz9~ fe 2h&&x7y5~bτ>? 6s&c, qTB'Q]5T8IZ9\5oZ0/CyRAџ~+>Q/wb7ٗQx.sZ6 C%d%5Ș0IȂ*Us1^XwdtfT(] @qdr} sB)իdc^ց]}_wY:|p0HvnEW:)"zIV#j$Y$d5FHZX#j$Y$d5[HIV#j$Y$!FHIV#jd5FH]|vfab}!T{F7JFi(Ν R߅C\^q8ɯw/8m|gWi h8}M/j^9Nh q hh(@f_L2u6FhM[ik4mѴ 1^5F_Uε^O'EgFzt9OɷzFH^JI6{6"mD}ID:8n_ 5^BrM}Ngah"*3MGn)dpK\ƤUR.)(Ovx1zr'z>G@'?n{nަkǃo7*QݛYҴ[4|n{l<7N.IHJN1]v p\dYwJ@D )ӱa`)9rN% 6)2hlT,Yu7i6:,0#Mᨲ.jg4Y"g9F{5(xe8;^_J2&Ώ+v@~ 75 I.CAf&vyzr[̚T^]6 7ðt qc#E+.M!r*ɤ% $BleiPiLcұi %hyֵozUͫVYްaXe&WiYc>Y-u:T P{75Vs%l畷] oЇ7,m٥hڇj KB!%!X52ep'mHs $JVZ4Ri]X.`Vn׿? ۫[yFW2 чiI 1FߦNm}ߍ'kno2)YzL'n!Z_@d;J(8tLu {7W0|l^tj]flZ|B,݂m) KwSa.j>t"[dՑM   K2Nh"k(^*m/m/m1L>3%*HkuDH&FkBQYM/fgWUN=/"6!E3OڷY얦MKrhot׮m.3~B.._tuGW_Wons<53/.+;rI{/Y١+7d9ϼ˘߉Ez.ptv9?*SoB_^V]V'改Fyvs=nFg=xDɔKTȝuSO:Ad$u?1?Uc~de!d8jZP#:t,1jH@Q"2<9jflb,}߂6jQ! rp}R !q19FZ'kYU\K}zf݂n5eɱ3#NE+@h`5w^t,ӧׁ !NԐNr>+AӔ^Hͽ2ܢ`R)΄LnE R Ke|16BaI*Đ~ц,jW%vH %6>x7yZ5~{bU߲? $ؠϙ$]\KR?kqx+~be1`#CHL3qs82eWyetM]޵6+翢ٶYdf9؜8oE/Eg4Gx.@{-Y9 FM*!()Z(x xuH Wԇ(kjUZ'msy~"(B FvP =w=Ni%tN#(hb}[ J| l( ._4V`ejtKzߋ*ƪDAt6&6J2i¤AA .RAEy*`sAh/n6-tJ.H֪KmbjQW TIΡ-tOnzlWʓcK`>Vaca.Gj_fry^iiGU"ˎ_\Mf._6m'8o*'ۻ2AMC=㉾13"aDukNߚU2 enC{Z6t3V&}jDj2F8'xݽ~(!Oɹ5L: }'>78 ێ|e)@›mjq2ڐD^?gg4e\hڔin=o*{vQoO5S\x4NxM8Vi.VկrOtrsQݩg <Ǟ~ ׬`L0h.j]֨a-FulíN+ N7~U_4xY{e}l?{pV4E[j9ŸӦL.ln y rMw"hnN  ChRo,p _$y][VB۫AwiϏCt_!_6 FBMZDCNf+V,A;#/JThs' |>]ĖMDOh"-Kbo焍˾CX T rJ<_ɼf \Y؋me/ȲP C)(b4>~0`_O-}S܌ˊ44v\ҸY,k P5v>ャL\hx~l{'.xVӕmn?>vq RPMT24y{՗6ﷴY4Y(C)Wd0N#; `pa5D2b"AHbQ&化JMʬ (೵JjZI E0jB꺎3qHM_V%ͣ5t1ۦ>MknG-:[? ڵ8 <~b\(s f+)JĐu̡ved#=M{wZ+43tfAZ?%jF Z@%@$+9)F-'D7) N_XX\Pkɨ$:^ ^::ΊgGMW^>@PtY^ݎ=wto/n:#lsy DMM֡HR'ʏTG#'w瞢(yjv7CI(hlO b$]4Y@e=[ͻM@7n2n.Uكxzw=!w;~z4:x=?\xZ1Oi6$H`OIZ[X35>$à{^ CqaeMl*vɻ쒗MnWh|LɈ1:Q̱-gM1il]_턑|xZ_ꫵGl̬tDi3TI&Ac�<1! [ CviW.Sbsox1 eyuT(Q)O=TҰD>08ݳ8UY{mR>Lha"l0Rw. fgGU9o2Xf? =&Գ@Qמe|\@XBv0u| \t͸%DI T)œ.$p(Rz[*-ĐiQޛ.3쏴ǖiV賞y8b7jtͺEi.v/S} 3aeر r1hD saWa#\'SUFMudY3AQTꑋ;bLMKT0 7pn^ٺ-\#U.W/ŗܔ\S D))K"DDE1Yd0~Np5;(;0<"R rv P "L)(oK4%VQDB*#I{׍ѫysP";Ӎ-[E.w*_C\x+`sGv\sK5\8cs= #j,615Ig\!E0& o\RĜJW}njSעD.pz>/Y">ʯ㟘q.Sg*홒 QW2o_a9rmGNuh>iߞ\?罒Gi^HӖUV -KӖJV{\U9sUuXKkR;4W ]J1WU\<sU5U\Cset]< T|,]fܴu|ONIv_Biu&ۯ W7ӡg}4& y^hp/:D&oxj0e# 7[!K $׆DjԊ{@HImF  Dm;) 9- N!QTJ|,ֱb_L^NR$,s-+B:qH  gO~'zϞ`wZccpN7ͷe"( Z_D KSSCI┹G@[" wUb"46EY::ŦAuKeyE×-vV<̭4^= ^hp6wLm`m wV{wh @6V*jԡUtp1d_ $ZhbM?o=B8!(ݥ ѤTqd%Viwt>+=0 v.!+܎|T4*5HA5QOu}Os€!h^QR(`G !r"O1js"sPϱMע(a]=83g'#~=\/E4N40Xb 6 !똃@eĨxM(X,ޙF|fO&ҬYz, <~ܵb1+mS8R]GMMNvjȓ|x;bшfk Dq>)$$ދ"WW2DՌp+YwP U\NV)t:P&ҁH :S'oYӟ:Ib ,YCB=ZH'M )60շj=?b bvqO+[cu=2[k3=z)ѫj-Cq]c2H `sP aC)RBj h.yW:J룝f9kk;J&RX.X(rM CkttҔSE~u}f'JZ"Rg`KMIrn\*P9OٻjN-~Fc~텛08[ÌÏJQ$l[y!;>:WMލJ/k|:ޛ?K>khaqoWlMJ^%ٝ><.LƳSoː-+)ȇRxH($=D7M>Qyw|1x!FАђٚKtZF'H%xϏ(H1h-2X;6{g4qd./;*tXs)W][)6^zV<; /XroUN-kMҏ7~f} ]L1?k;}a'*q2#]1ly-N5W8:?xA?%?!)|M4Ry1U~{%HW"|u1`=F'M%6`A?x^^fQw|OMkp$klcH bI$ 901@XR;.E'-6^(V۔@IZgaEΦs6bj=ϖ1#/iNvo_VlO^OxɄzk'ne5 #1h,CN"@١MJ&TpPiyA} f _`@CLէ]bP/Hgйцm"!D7 $&1A<,;[PB3uK2Osebw7<[fOϓ ]Fga5T|,d> ;g%@SK' x<׷wy=[E y4#@N"TLVǂC1wT(Q@FTi\b#`Lpx\z]Rur3T+E3 ɖ;^[so>³.CC&(PԵfwO$;K]FwuzO/\Y$#R [J S$ $ (R!"fbxb|q..p,b;m8 y|G^|xJ6͇FDsAHcF_ {wl26{%^`Qቧ&M,x,Т~^yØwcO.*ݎc1`ţi[0G$_<_';>x>%|;qBPK%J "D絍"b()a=kji5×wّq @;P rP ῑN Up%3CS#Jh zShz]m] BqaeUSKX EB~3Bv i2gB(ԋ}܄=pYM7ȗ-+)+YJկeePVŵHs3 5N3ĴVԌ?I'CB6>)jb&!$4!f{(gI5I5ꩩ?͌VLH`W^KG3vnQt:qcu6q_rj.:g(V)JjʜRbBrS%^Lx[%Z]Y5j[fv{3.LXRM$Dn3Dh(HfPP*J{4^Meyy j ADfHBZ<,"9-CVrҩxaIbрmmཋ:PҺ|GkIjs+#jqhMF3nWOȏ,k2s~p6vMZ_wiɖk:R?)׷J Ne8qVN=pr b4t0cY(Z/W^ޅ-] JhHZhtCʍZ(J0uE;bJx\NPhФHebm@IP9JcHz\+iesq }A(!SNƚ?[duO e%y/ $^A]Mg?\:\/]?>[싩AYmsb2!,ZFU{R4LZ%Hf]zAI -dp$@R⫓e\j 9S)>P7z#l`e_^0 $"e+%HXL<J>,yMZ1_Ա|6lC!j;O=5̓6ǁROmi_~)ֲsu?G&7."Y R5HhDLC*;`,ٜ_z瀯z䀯zဟL̾$d,"3̵6Gd"sxB62.aNoYS>zD9jP1'1+`v|tesvMx bWnyr׶ԛh @5}V?1)|x7x9ٺ{kllLrZLR%S8jf޺:,ٺUٻZzԸZD˞ZCu[>A}4=)]}J09ꞎS͉m,kooE_6="q{C ќ b: !ϐG{!ڈ5m0t ҝX7;L^CNdK؞mu{x1KkMd;R.:.UZaAY&KZ nZ*!N@;'!MNoDfɃ1'ʁՎFf Szr\/U.۞|}٧l 6Eڮ-',V,K>鿭DkޝkCKF36Q5pm/4xQsU*sY=#CZ]( v夒3aFݼ ݔtoKg!G"0'~<9][oArZV>\f3?&U0Žss2H*I6? 8"Nr/n k41CM H@0lq C%h }ƪFfCҤ"sNZ\p $wA  ȹ-Fn{WW(Kz>'M>|G^ vPͥvVST*ys|,_8hdLy !D-f #m35FsHUN#Cg%Ґ젬0IZ|4hLr " ҨApT `p?1r<{G^$$0"B`r8k1@myjRu>$VX+dG š(C,ew \T? ^tЗ}0hԒb§ \@p:gT CfQ3ȜW04>ʥ.hujbCRUfubsv|x#j7'L̜ÎI7eG Պ\{o`υIS.{GˇQx?X|ًp/Y|杺:Re?s4*sxȷQOdAA^gr|E@cFX֑e`~1Uk]^_ B`d*e\3LHm*F$Ĕ'W ӳdFV[W/;RȎM/2O%Y\F\#Us}~YG2N{7htϧׁst]!Sy5>[S~Z$@/7{?N%e!HNW-_ )5-St(3iʹ-&oLJ7$NERt&FaP5@5dQƃp >W̧_OC;ߕq3Dyԗmyu~=GFl|ZS:afeDYhm V4i( DNdl me4Ece.sHc+#v5r6#6Nq9ԮۢfDQ]H<1c+%btQFN* e^a<0YG69幔Ui|,d AD251y6hRK~<Ƭ" dT'kW*FfrPEG-Dd`"b&# &Prd-4.pq(xX;CY̋ېکKWOgAgw.]^q;UKwqW/Dja=ٻ:?+qt -MC;o&Ɯ~W**!Dx#p4iރUWꎼR+uە76ԯ .V:+8ʔ)sp6'cPK9\EP-VIV˞x@x#tBE0J/Y j/vȵZܧN/Ǔ>ubI_NB JY_s3 p'ޤ5%f4"ƈ1~eDߕ|gEi5\8I>]r^kڔ^ti*?H˞]<`{LIy[fa:x}w؎'w0*.ߞ\i&2ic"YBp\h5N?WBECʮC"[$z]q\ݷ;u|9-,RJjy`=t?[޶5 ht9%Rlv<|IlK7nՄqr1=]z2.s/a&R8}:=|=sNIײ{$kJ/GϖSlPwdSZ[!LbAP"&͝e˘IZkAӪFE*כ*z Z 4s$!IaRla.M}6A n8V>wz5[}̵Q:e!g:o1] RR[ᒲQ1%m]o^ Q P:X)YBkdVjţU>EOs>0\ S;/mԅ@oimѼ!BwLeGhc%R1ƴ+X&".GaM`u}Qy+c(U %FtL\9Ġ8zbmj3 2Jp$Er,OJp-v -5Y+Lrbp"&I!e_Ppq}dkӡ$7IrS2OL,sx>hXTn-W-c4s2Ά%c 1VRMYKHSI$-,.$Kٻ6$W ~6%}h`g`{fôEI*EG*T6,dVVdfdGF0iS*1=vF{,^{̷{6b B'r Ȫl8[XɰfX=QX+nq;qӆU0r43tny$<'+B=[0=Y62!vř Z'B!gh՞{:FYYAj$Խm\?ܮCp⹔6da:|cdSh/p&P_e0ɅgWMK_`ef@_b6q~9U2HQDtl>NG¡xUrn 3}/}9K0c*0^4>;w"G9`4måG"sέt<K _+%O~ɓiy$LQ:=0uFb#*.9Zr֔+ ֞kGҴ|hOL^y)Tl2io}Huu 7ﺽ "6I kj>2 m:z&W@q1aw rhCbu^=L`yWvlBZҀ4v>yxwUpek-x8 bMxQȥYuݑڨ$9iSmNLGI[H7 Kq47̃}AΕeNF]4Six`,(3m}er+gI1TG_A:QM6}\+!8%9e$F )SlI`jFJ!,η[[csb7kGjS(Y@SJxPlE@"5tNJ*j#e za'VdI?%/9:vz0lWsal>;tt"t$@㣣4$7}0Y?9-`͙A6ƒEi#Ew1=$bO ؓA NS&HOe\(Zzf[.VQ1"ZTa$#أvkIk8M(rn(֑aY PA[J))$]K-rvCU)tN,u8sָcM`?>1jߣ= ZvD'a ^-ѰQ%cKG>u]Jָ|4~ve/~^NL>1",tj'&"dh4h#@ʘtkS}&ZKۍW˾KA~eV ^+kA14X`"k"QKiJĨ6xΨjgoK g39kޞPSaNhđ2ߧ=2K46o[ŬYRR"5Am;Qs>Rl-YѰAb?d$ (0kV9!xޣty РY5Yn4~) =w]ָ( ȸ=~)͍)L?\{1z {3?5h'a7Yc,[Ϧn{)5 BԬ^Θ\7L,# Խd~D#L)`2IoW)sW߀SN[ؖkrTa=7o0׊~Mr" qhRQMS#SEbβV.'2(B\ XK&ۙ-_GtSii1]{5ðb!f,of~$5S8pe&+F/S`f8z?}g]*{1cUSc,j ܖ7~>,rT GosjN2 lʣI5A2@DG4ũ\ H}ޞ\ >$hmREfB [+#+"^p)i& JWEfbgmn< OGx}5xbIB~T$Nt>cl#b$˺CXq Х4EG-K |ZlkFB9Djm"` %0gyO]|u G[en+s{ۍuK\;hX:„K*bJ K)%K'*Jå3zt93sY04\`D$+h]!ɝ8RcÃ4xcvh#@{ǀ}{#AU9,Rg2C( HKI S"!&",,Z0l\ۙSZ#gdxx^q'|ސw9OcmmyP(Bsἒx9O}vB~ށPQpI( W`Ń!yb*:ϭ7Q4v[)$d5"2z=כ|SBZBK֘53ǜC$gu )$_nTIQ,˝(g̹2Br]1SpsjV̫YfEΨ9jV#w Zes Z7!pudJD+l J-G֥l%5&H|IBZć%S~j5o]16QV%|>:y0^T]/+ѥxJ/QU{?IX5 W W8A'@,3b v-AL:O>\p464gӀߦM _~3 m+!<*>;!5dUq+$q@7⢏6]iY(sH 2)F2 75 uSCQN !#fɛ Tx6ە#hD!02!22yps{뢚 5ǘpzpHm;P1:E-C=^}́Z8FK]|[9Z85PQ≮ȯd͚ζZb,:oZwhIjiEKOb1")Guӣ)]97&!Ή&/)Ή/VTv5@0]T:we"KqZ&jb}2QH|NK%ہ;kA$g\Kx1ʓbT \NfD-$*TGRy+_7 ]R1)Ƚ*gsBsW:^o][6xZ{5+0*V*Ce{8>b cݟU }$F 0K+,^Kl@V 0msEIdʢw9Ib$j[5A$hѵڵz ŇjS/{[Xv,Gx;0!㴇j#dla68gptyt!*jV /`ňD.V"5D%Kӊ-ՠ3X,{61o_E(AtTtw.= *~MxTwKiA7v4Uf\ݧ♲c˗A},t1lZGeӧzw,nsm!}0j*% ra$rD ^P;Pl@hIuK^㓛#ӊ4j?*;/q%WW.=f srA bU"_JRy*QYt+z!(BWOk(?ݟ^*ܔw{y1K:}ƦOqT 8/6oB C _ʨΌKy׿4x3,J/Vӕo*P7WͺTisՆ[\[QwV&3g+5yx%0֜ nl}͍lyFsٻ8+W6} 6Āg̘Qȡ-%Ϲ3C$E#u$}ֹtjĢ(o_Vt<|@xX|BY)̫@0W펲71fKٸ8bG!ږnP V ?B:T[1yY?^n!,hw}y''?*ϗ_\r0#T#z#UR4GYP[}2S?Bɔ\ݪ itZBm3xʥ4S5MA\zSh 5G}P cnNB|6yhzhsv3KQ+]3Z= 5QualiOڠ-v4GKbdhS];sʥ1hx"!ko H se{]!11l1ZsUWUiԒ (k.9s6d mS1J4Y)k{7 ١L6(4(Xj@cVyFM6_ߨGJ'JY ^WTrgy~keU6[tԠ ` %S>R\[h'DQ!dgUդz.k9QJj MQr#钍5u}otsaMnN';Yn]=cGYG7(}m fQU6AZzC PRAjC[>"0k}HP ImgjSr}*tE9yXjjb*E*6uAڳZaPB B!Ҋ ӝ6*e`4TA 6z3XGE)1أty9lxiۧGҬiD]v7\]E b:cHm̭l L3.tlYFX7rf4giFC.+* p=*O(Jl@5lmu9MEX6P[ e AnT +Zwf A&뭏@C=gDPBfEB4ÍFU]PcR @A'ŜY' sGAA. UJ v]t-PJATz*A[*MC۷!@:kB;W %-A fY{("KQ @dZ@!O] %cV]RL$tykh 12?{T0Wz,IYt u@V4:I=ʚ9͠ք( ѱn'2l5>TVcGbI tf iP6}׽XW̆K(NcDU:Ik!`BK>}o ̛ܹ_`ם쒟(y,t~^҂׬`ޚOގ]@AQیZI|tx =K8>o9@G6*M(]͡-6V 4r3i<1ė5XH肼AV24ETLԴPyM,| b1w$ W4#Ѽ8{:K@B}dP֨Vx ;P #ut[Pu4+`FuMOU;3@vV.!Ir!ӧ5 .}t{ao̻:Yʞb:\yS0+xme,#{ إ\nz1΁6DRTS|Q*q`\FQ vo aJr[ E@Y) i:B 9@bZ۝ 1i}F2z v i5<4^!D xd+)`Qm RgViiU_(- ʰ݈&@M[0qVcCeXkYqj(YW=Ѫ  =i"Fg4@[i(u=zkJ ;&LoVA I# |4~^kL!z\ϡLK½<_힬t8]@U>[no^Ws+Qt@=4̂gekO9˿ U-eVn6fQF&p4vf cYFoG'fMj:5h7LJxKT尾T9P/ϐ 3p[{hNJTt `$.LMU DGGPz,=q@>.ĊbPDvRA'w äoGɟ"o,oV2 -].|E#!HJw:HnPUBU^ZsO!cSULmT@csZ݀[ Vܢ4S;k֪VVm0|Ϥ΀Lڡ@Z %`5נhd?Co=µGӈX*>X{(v:)Y'Xk*9 XvAZ #C+Bj{SQ AOj|d$NҳjO^ F`8%/C)FuE>\( %lnT *m/!b!c-ձ(ZI~fŇH 0iIJst\!JkB*?#u+yZ D()Ġe/_Zܲ^ovKwk6D2K̕2AC Zrӧ7p|l ,jiz׎W+t'T%hy;/_v˗6}{ˋ~\1N_/68'O! I6yoΆ3.H7wu8Y*|iBvcNo?Sqf]*FU ebÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J W*!0nLc~wvN W*p%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbկjlJcBph}+DIN W굡'i;^z^q@! v/P6m~6Yܽċ7b _|tuεd;dWؓ؜ t%_(3mqڗ[6?c >}N=~v=ş[[iDZ '=C q5^N鷟/};珁:)('mh7\|zr77}]1~9^jsʹDu]M{2gw31Țb6Ml*fS1Țb6Ml*fS1Țb6Ml*fS1Țb6Ml*fS1Țb6Ml*fS1Țb6Ml*f1q2rlp(fS* BP@h|m܉' PvG%w00pև}{r^ L0]~vЂeb]di 7;%cO{WnU4mSF5-7 ݫv!tꋗʸ~qk/Kf/)I<ӯ~>|8=E||,ZrfU[&nYc4ΠcQ> fq`4z>᱓?.Gz:V0M->0\S|GN[|z(}\pEW$pСgrWp+kU:h{pQZ{aAWH"WWݩB2L@ = א\ *$R˭yf?}\i8 .oPðOR21WpkdalHEcSsױ)#6uqB y\&|7bٺX,{{[HE )pH4ԩ_N. k 53bpִ \ 8 ²Zpr]&Ȱ[~?FIqIJSIJrerU)pS-Ib6pêb.{ELiLNf^ʥy猷*sSq/s}tuϪ1KQ]i~b2?r S2neSƯ.a0AA[ϗ^NMkY>滜c5.Zb(ZFf<:0Hn} 8J@ 2wΙ;%*%C!QZ76 9mp ƃ) 帻X|UBŠr\@F?=2Χ|.;js<܏,=5O,&JI8Ӵ=; Ok阚׹Je/=6=f \G9Kz?.;s|~m1OgrF0{==7q)1ID}:+9?AE3ƅ>-$ oٵQ_~k'Ce7,[v؛޿c98~!;|4h|ht12Iۄh4r2@zuOnrC`N ^Mg]š)k ZAݑq(Dg#1 q37kJf=Ge,q22h9ޖ[E9|X_ewrl8ͼǨ^F=cW z@#KΛ| >~LۇmK\}]c!>vcȲ f?2G FmH.tMߴˎ28x3>nQ^)bmsVAuGK: `x+67zR&;GoF3'dna+yPID&?w׌["&hxP`С77([ O?/ǃPy{>.oߩΊ#RĝфsXq2j0:9Id^ZK$ᜒSmntpFFZ~c%Y<"^>hEp2<.G&HBb.">3&H e0Bdv[ڡ5LmBX/Xta(uQow) WG,kAӍ~۫>_F[M}_a7W`c/v0[=ۏxa1(VWIMuJl7 /Uh c><_`}`a-ےj`m޶ mo:Rq2R?-'>VL3ۅ&RPPʰ %JM5 P4*$]@CuHIW1lp$LPQ`T Km"ng4YӆRC udܮGN<=nx8AU;}90DK $erҥM4 D⍚L 뙱1$SI7ē\xh})*16i#e5tdmZ^*PMselqVF[Gg"6 ZꝾx7'?-E{xT|<5Ph" .(\QPIB:y")bNOfEb/EVu9_`Ա %gs8BhM,(L'FRiEЁHOӶe`,Ҷ` S+^ *`NR>j >F;ǸRQ d%ifupfApep[͚ٹtzsi-s0v v&yLFd !U>\05`ؘxD#-8#0JdVDCa}d4PS!g/傼\WxS /,cVA3A* ߢT'W1>~k"E,"R  HGZw` J@Vx Pʴ:ORs#)$4m4(d3ӄCpO|Wk qecjQoN1o)õVDjD.2?`(1HႋΈ?pQ$[.d,AuFF `#VG#$%NтR /EJ؆%1r^8;xi8@ 3K Z)IeD ɣvTO3g<݆NlU=48LEsJZBGɚ@BJ>E8L O3'u뇙oDvgfqR{gmK#SL9)tӡVJ#1Z;젣L7&la^_Ka][dv_p`%EM[v280D6PYAϧ t?w 2B0PȔLED#IR8Ά8oԩ&3צKijXqWg;77T)F:ȏcaȌ'& D;(6ga s9ݓ.`gs&xbhPhTrU^fe&1a?f̱9i9ECuǍa9,LD×zyyjƳO@CH8тL{|C $xňh'6-;4߹fCν(ށiw{FBTpQ"v1q-3*&N\2I#uJ9ЧU#Sv 6gx 'RNzs)U~~hGPub8MD<[0G_hbG%Hu9^'(^QuI^uNpz[ a2KJ_t5҇%0otɠ}Ofs|X.ߑ7Q~24; ;L4_I@B)|^;ծp \q (ܷ]n7Ninw mv~GѲF?;8: @le 8K"RL:0/m<$%hn6ϓd;b\9#H2H 1jMx\NG4-l  9/ck;Ay3%(ǡl>+No+W]^D#\-y̯z0 RpJr]ko+ }7-Q$6 {$eqi)g;b45z X.U@0yvZ;Ŕ%`&xEZ,׮ID&nӲ2TH.%. H6wKI(k.h,UJgl9+),?B&\wPTKBcrf+y VKd)D6KJnU upe9ՠ Gaf| ie+RtYEn р%v(F=0 1|6dт+.HM!"r*ɤ% $$ej$2}Weڋ Ul i:|oi8AT__=ը<@l6o ȥuk^y[g7& c<|+p3uZCغ, UDF$t\ADYJZXꐛVjoc(jq#ЕO.wz?|x.1ǻ Φ8q~'UvF%]YddI|F@Y3N;G'-;f΍BÊB O4|yt ã{YѻlpFmffA"?`nuRi/5onת-d~rXtn"23-oP.'Ԩ{={㊴0܈z1_.O9 vz+>AsG]dQ"+c&:h8Ul8Re=V*{4(1L>3%*juDH&FkBQYM/fgW-!6)ʂiܦ=pcp[Ȍ$2]Jg Z}glbs"z\u4|hݯXWhfPKVlоs7Di# 5ap6.4eavCQxL6Z٤# rhm\"]}߶9;7⒮GTۧ;F]Im\ۇȶ(yP.oqwsoĶ[e ˼=YZYBQEnT/v: b K0vvY>-'av>];?_Mg_8b Ŕ'Y!" cVtg2,6JmY.)bKEΘ6@qR p\pGN0HYَ3q6#iΥ>uv%;8b.N\$^EK-G 0sqKQ d(YK0-vz9.;ӎ]:3:}_6uݏϴQDgSD 6w+)V&5B4מi㨆{{~Dwwj}Q}9H"#Bp95pꝷփP0/T]z 9xQR8&3B謏@}e8m`)5Jz}:v㚳 ζTp:k_ _z&jr,V@C'tbAd$}Dlh;ѽxLPƕ5랪)CFqԊ[J5rC@g8˂S21((-H_b;99wT8灁hM;/q@t1jQ!VynbΣz{WlAR8"D֋O,0peEZ'wEqcxp3# gGg>ڝSI #u%F9VSk1_{E2:АbJJOE)<%MCz9 &Br4pI8r 83W'Y3BJ傴.Em3d  ܓT! F&ztWlDQOh%ӕUZ[zJyEd5kJ}zp>&cĂLԇJq@U4\Z.H)`#B+4Uذ".Ik8KCW/eIG l>*9*U|/f,fGߏGE GR֣'#Kr폛imfl4NT>mߣ_Fg_7G'U;EF'=a4Gi~ߖrUol~4]N-YIE^=ʎ4IU MFʋ$DujpF,.5ZtgGt*Cy鿎?՛qYaYQ%Ks8M &XOV_ 챖 @WqT _W߷ȱdg?/ɨ%ckkL+KZ;[VVF>z>+RR5`ZW¡P"1}j$\ j*uԖ-HU*]:'շKE[ó| %O&Yh<Ιj.j+O(~ Ro\o 6g%MEYԍq3娌ש]iڿr1xL>,Dgh<7 )!JiM.mH_%TTV^3Twg6.W!Rz;ND!|Lwn7lQ;\}U>S\pyZ|,)-\π+;ծ]/\ 6vWE\WEJ\D|DlQU|_̕sih)+osjd|^_Q h4q9 :8nkpAd} LH!QYIsgJkНѝفM ![BN y 7 yS$j~LG0ӱ̡cq:VڕVnE5 sIhVXцX 2x@Ԟbf!,zCX"0SKɷDADcgܪV!'43Ai_$€ʐcr%э t%P12}IlQX:gsgL dLJ>aǥj.ħn*9bRJw9;myON%߭}b%FdIX"x6Z*E|0$Cpts,فZs,W1gukkwIdUpl&| 0'ǍB-U넋قY_OY^Wˎ6dyاhѬ+տOOiuNw?=ʳj?t-\mہD{f0ΧMf|xt-{V'yJi2hݞZuX7Isqʻ͖bf!*{.& .x%[tNrr뭴o<$ N>R YS@y'!(&0(S}CdQU8Ykf#^xΟ1>1S rvE(FHS ےDѪ䞄TF:rh~?AY /"jL).tQ`dG=}㝔ِ3J5!򹜓# 9^Ҿ9^w9^'ؙrNK'm2]@[q*[ YI%2'i ?ymN'KBxYly~`};j2 {S}וH:No +ٮ9mG1kؼy1𣖫֘bt>HՑRQ7ݖ9:p@ͮS=T+ni5dF]ʚIV&˞_2ZtTLs`Z &Q<[ 2N>dئ^HDD 3 Ft,uQV9Rtz;*{ѹ3I}!,Kfz::\KM|fc,: JlX B ly 5K"-dV&-#LL6H)BO Ut>a .&Ijo.l=߂J MAIb^(jagpelvo?8~Ā-K!-|rCvㇾcd,h{,,XgMC%B R|rKIr崎s5+tpmݷߍ%#o_j㭏.#d-^W~s.z`Uog6 z+ԍho}걯jw,8ɐfVoe +ZWgC^a YAd/A%+t*CC(GBn0ߩɮwU\~E.6 5>&g) ZeX;ؔq0[; O'Fڷw,>J"Y^d)+wV|t ΡjzӶ5BQ3q`XOS:OLCA[C0Qhp6aEE5tԇ2}2}'e ۹͞|S5_ D݆e1N$'(P% j*N}!:_) &ܸq\b58, $CSGeW KQ CajͦsD6m3 IƩXhc!XxV"2}t@Ͼ|<۽0~f:fRPЩHMVW4BS1V+m2Х"#/C5BB(yZ ֮l϶m'/yޔDE$#vFx;[&f_Ptڮ1j"I:ZGݙY2%a^V\  IGrmָ`c__2dH.Ւ55sY$6ɆM0.u ᱭTD8 >7FHKY|PX+} Z٥Գ"^ށ5 iTWBY*C2d*r)FJ<=T#bF/7ď:2.֑K}u6qQ7nIŀ.,#B:{Vn +I\dmQaFZR95\fkq+\ 6S: a ûk*q_Jя/BJx͘]-M VJy^lP(9̶& bxxG;bJQ4}IQdSsS2"` Y C>ي+Nhɗ }W_jEI|GR|P3} k)(ۚ@a>'Ӹtٶ!9>?U'Le)J4o I:-+g ̲6+՚V4m]v-֧Nyib KsErY,jt9(]$:zֲ?Rl睵jZfFq&%,"ΒP6tN0_Crۈw`HcI@b?%UN36 ﭦsU{H}lE=~-`h5pguOUTዶ_>+KvF vJ숁^*>.y]D4D^-: "v1DىGj %—DҔ_WhxK9 Jo3Rh j I!75 Akh8͗J鐔E:V(,I KEhQVҺqDQOW;oiw3p͛KJ,Mp>D}uޞ0 \u [u^$35It `"a+u=ko?J:EL5_ZwHȐ]Q&ABqQiI:# tmμ9߽L9ov<#{|ջ9 FM*!()GֱR`H2- |Df釻B~ }QujUZ'³x|`Y#("a59lJbre; -{GSme=F)r% >AH|6wJ[V+0'QJNVJbA_E} DAt6&6J2i¤AA .R>TDA~PtJHEZ4H?)MHIuaaL4h ՊaLѹ"JarؼhUɿ"}խ`f՝5̪p1qfIuv}sUe_ {u:]6ty }GRwk\\a/>EMnLmU |9e}OݚS Pon6.EWox[\\oGRfx?{GGzi~qem ſ1Th6 /寍6OWX WoͮvfO8pGa|1OӋ>.PM(7?6<ڒՈP?>{($PB^N5T }㯗9os/iձS0v#ʇ&a==]7i_y!Gו(Zq&Py (,UiXl3J|<~Y+_ue[%`C8ɥ |U:7ѝÖW[?)lwGg,Q%dV9QRL\ovEӨ%gMyp׿ 5bq_.gտyN6|Pq/q{(([VsA;6s@4`%E%oNa`"$ `%䁽f7B}~ƺ;AѐhJ? L9>E =mnm~ڛ!d\7-rs} 6nWu}7}w +/{X֋+ձ佽B˾\ypNg0MkBD;d"=!-F S597Ng {{ ea֦Ҁ|I:C5_Z' z+Ә2x_F&t@\!p]pqM-Mʵھ{›mx5?1^eQ2}/1s%p)@څhsuKÁD3;@'Dډiv<4zXВ2L{F26Є -U)( IC21)#knu {V&x%9ru>p ھ"`:օُ #(i6p3EaB$ xVF; CYل3(ThV|)A~պ3"i=aזRLjUs55Wt=s~D* ј+ј,-n3ͭz;q%X.IywϘ@AUG OVM9:.p[>| Bxa.GTV;CDג lb2aur5<5<#3p╯r<_~o?u6d1!S|좯'̦'h9!xA2N=ޕLQ,vĘ]8E 1{&B=6HUc]sן6Bmm볧}=1UY<|xFgP.*~ӻSVͥzE'I1(qK5WMNW\6/(a %XJg+IāivE&ƒ%|2FΤQ5 Hf AB$4“ Mߌ&a'=5V@|b{_݆x&`h$Zn1Yv-yRRcbP jƗB\R j]LĢm$"*NY봵e/"yZ 2FTz$1TfE.Ts"]ШӶR{gu)>g|, ͚-< ;Mړ:sq8B5Ǟ/zk`H8Ãv"!"fKI|$Vmr͖>D*e۸nfd]rTmGZf]!Ё$%(iA1L윸 l[~I[gY+_Z2;9=-nW=>zѯ)lrU1(4uR 2Q%JD/9I3J߹Gub'*o8ƕ9ē୦`!in GD8"unX)*nTgi}U{FSM.J@FJ+"ت^?ok]MM $"IZ\Ty!' 4& ta<1qUG+(H UNecK@R J#u89%,&4>fT"ħL?70!lcSطhoB>$0Gb;^]ts!|UOO &Q^ФǓ.>>otU'XjTI#IsP\ǯղ?,)ys܀XisZS5UsJ~_*̧O'UO' bOAfB]QѻVOcֻKl}_ϱG4yas pLѳ]ݏdU`EbOn/yG6,n^5'鱒0?NFw/ rNBI.Opo\pir|n?D+h;7%!HtByt\,AWl+yJQ1,?M; 8jjb辭ꊪOUgxf]_᪱ ̧q~JT=6 TJ!I-_@vtݾ3 >,**×[%QQz[P+-~ũlY{#ˌK8ϟ[qVX#U- VU cY6?#&g[EoqNRSD"Y%cDsDLbp깷BQ -z۹4A7e5|vl?lmJ'pF)cEɨYT/!{i]OltSmn$<+wiu=tb"+?+On8Ug6*ifZt~6)l5iz&ζ|1ԬZ뺡9AZ2 ;8{Px[$H7G9akfL(:അ#|ZP)h! 4ϑusӘ$x_F&2t@\!p]pqM-MJ^ؾ{›m0?1^eQR^}/Ӄsݷ&eL?y/ =L6. ~GVx ekku"HQI˕q&4ktokь=D 8B6SOIe%/0VDfDfM0 D R"(p1J]8-$9OYoHN;kd4Jc,o5 %rI{ST)ƄJ؆Gpc<`zQZ3=w].ا"am l?0lܳ\,kUJY~H~~$?8?bz2~(r` ~Z.IJC3pV"**MB"Q0!oކ\MJa3=+(=U&0,}z=/hV;?\Pj$ fgY^97AY9{MZXˏy00`D QM5 ~4{+kQ6fhsnٯҴGY&= 3Of] ۺ`׵-ة62vb] mҬkRÞ-׵hN wVGjiwzoK?y. q,k)y˝y]WkVr IDSWwMgӻ4\}eDS5~:9zJn軱e~:4]QPd={ y,Dt^yᚰ]+ =jKeVUKHRUTwb[57] ǒ)JgEucM3% dꐮrvs\U.kʟ~e& 騭m J9I $" dȶфe,0#%gRBY"g9 \NO_K펿 vf|87Wv$g45 X<W.zS]7D|b 5 ci퀁&A&x -P, t22(ѐRu0` w)q!XШ):RBd΀54K`YTJ5qvOAƙ2L(HjxƲ'f `1XK.m٤h(pg.#,5Pcvs(LL?1"CZsي]ȍ#!NaIjY|a1|ssF \*#6ET)$ȃo9ҟeY4sl@slBٯ*{sErؼǺmg|"w`9lJ*+ xrP`~[+"7ܨl& ި7ibLX*nVcvuI+S"*J9˓6hNAeI,A Z4RmGnոg5+.v-q|cv'܈~hYPhŸ-_ڗfhm6nmuu|$tfbQG:)ڧߴkt :O &IwjM2ބ1NՄqyv5b>H<$N#3[^(x "M4*)xI%تE6FԂ|pop%K>bƙ$|f>KKL1LJ@&ƃq+UJk&%,GnS^qcp[T"3dN0w;+^}5qvyH٨Ma2n2Wr%v]RrDݺC[21v%Ԉ[W6˙~t mӟMvw(ZW"H\bmH $ݜLttwWP[̵y/7־]mvSkYz-ܹYنWg4~'v ptdvOWaZ [ת孻nծOkyqy1#wBqr]OJ,], &{v2at!CLt#I^cbd&;%%a^qVVt #:/S@/5 MY2?M5=t4{ͿTv 4(,h`9ÜEPQZE#"8!lI%%=[KH=4 2J]br"JeSe1 CZM594wBjzoc9݆fw@U=&+˰Y@+@(q(2dkK=z$8hbڜM1{YH, Nl.EY9Jw82Tmd&ѴոJ5,bjƒWg^^f&cbwqG"\/w~wCg0ठY2OQ c2:g2,6 ,ATElIW sQ0 66]6LC"!!e VFjlGl?s(]M;EmfDn)\&A<WQ%!^%$7L$'d9UvPus\1\L̐+L&&HCIYSJ'|Y :*a5qak/3` "vD7i2k-tD QڌI/>8YXQ~+J6EΘ6lJr&%r}@tȄ!Ќʐx\+[?K4Ԟpq<4$_g5-r7`#.I\$-GT`2b&*G-LJ0d-V>a=bCjڱ/x(@XG 9n:7~|ӏ%ZYcrlAu;65_?T8AJi QLU4מi89yG=wܫH"QY«1E[_z% %=Xݭ'cيwy*%!1Y>*+CKl8 tC O凋%g[pPX"|;kC ' [n ҩ^fztYVIWR-M9?EZH)&F GWx%YHZ0t")^ \cP#"WY h|H #zpI vuCurҫr`oO'߮}!݀ 2_' NA29oFLHI~ꟁ3fqSP8rhnT^]Zm,4n@e&^M+ҏvѸ|̦i]Cf8׃loHQņ D Y<ы󒸆M*"CEJeF9E1yI`ÏD/ },pUU()Xq+ƂxrzȞ|a'ޯ}<Q+>վC-7J\-;"qc"UR^ \ ei2좍&n(jI[OJŻ7}rhprDyq7j{\7COcFo",40^>wS"5{E_0eaG y2]Lǿ|8g.ODNOvlRן ` 3]]:,HLT$S2s3蘲t,&$J`2PYg ̏~NgՅ%'7YcysG/aJccvQ1V酖SU⏨}[P1Rc92 Q y kL 3O11g-UJ$DI JY:J+Й S"9cd:0t.3[968;梟{]Ks9+>oD@n&vvo;<-MӔZvMaKhP*u#"* L|_"i-ϯMiJZ'A2T!^x}no`Fm_o^Zʝ;dZՁـR 9 0 F(ѠAJAlbZ.79gBp,f|Y%Lkm0ZTj߶5rF_bx3fOmE9v6hѻ>ŒXՀlsDwq[~~^ OGІ{|+7=`C-?<+z'e8"ąT)VYbL;hISlxۋ=냱cgǺu-[xsܣ]]p{{#ǦjlX77Kxp>F_~ߒ{Scxl\]WkHW7_!­8?nᆲ۶_ O hƥEm\˼բ|c!6':Q(|o"32o[DsW琵QM_w_<|Z^ޮ힉Y 8[Zw7uD[wãeՁ1_s'-[U{n/`rM>Ny#kY&i=h"|sMp=ēN_݄5"zE3JXoGSYB%l`BQLPv=UuYUr`ٻIX4hNGRs>ۿ``-)1pqʀVk],S }-DoJ(m.*8>FT? rz*SvwnLƴ}qm+(3?fg5'6yB29湲~ze(694,V_LH 3)(ds; Ն,93<ɖl'|Ҍ)C$Yg n怵`<Xl⳨u1IIs^9&;0>-ufsk+9:ť#kDw*N , UVGWt4R l Z|Z rnJL)/p諸tTkNy]_4*[jrg+,:~6yQ#mtMJg2 8>t! HW7|^|qvi*hvj@dSCJA`LUī*UU9b@=z|EY"C|5YM\_KVi&U*Rze_0hfB~g׷=(CXJ$hIΕ`&Kq0Jq0HqH6f_t*VC[Z#%ϙX򠁳e'/>ZrzVK 8[5)\J&bd@,|!j l|ԮY}7q<)EZwy{u$;ri8_T^Hi{1$6+!WcH6{`.|W׷1rtJay ^66tuhpi!RmtW]vcnjMbaM#GwܿZxoٮ#ּ+#O tf/֝smiwGЏUt$rN2yR[)fW WFa 6!u5wMsDGEiH3eE)UT UTiPjd A+gvc:DL!(:>:SFř#&{JuO_Xk UYebU d1e*HD6lsEAu׷VPLvh@Tl%/Qkc 6GΊMÊZ֭ԇi -S[66{UjdPRmՑJ"EThMZ8HV TC("yIb7֑9U uD`])Q"ss(jKvLޖ8Gdv*laq-tm!ζiGZl<J[O!,W_7nU6fR0ZLMS,]^VH'2U㜷F`NtٮQ21jK̦q> up;E\5j*U%O-v7q[xw(TvqզVV{F{q ڨB'1k$*Wp 6h[Br ( Y!d[JNM%* .>.,MpMwa >N"lgԪ35hse W`TͱU~f-(j:}9=(< 7. puNk31DX\qMD#5z0\8-○":]\"JNVVvqV|L-6(&FaR&F9i_L`[0;GXvqkbS!v4{x1U0}Aps E?zvt~\o%]|\ &z~7) ;;FkaPʙ\n)wP*J 9p |UVн9 vyJ4yz<}px:em팦(5L?/$fS. UFwOߺ>`19 ~ј` b8 ( Ahlbj'ѳOY[S9+D\J<#ͺ"%x m}#/9=0HSlG=ֻg>X#]8ZvQeZn /UQ2xuq7_= 煠ʞG 2łdNb >̺G`zj0DA@8:hjN7 q3z3 ?/oz]?:>khq6I?F;?ḳ5H념Q+! z57mHe p 6Cu{0po } 0ҙ"e,_ E$EJkAșji FVSRro 徍bux=rr ޣ'qx ’^;gZbX)QHa29R& "vA?!BK}J"-H9؞WHړw8yG?yj=?n|.E5a O!6T9F Oف()$x!l`y$wvɱ6^-ReY_^|9cu5}\ncBiϩ@m.h2A>i9V:鉶6ƴMZm39XMYdJR?;Cq09ƥ͉`,$C(1L3#S3\ 3/3p=^$ WנK[-o]> a(TsfF08X=4ΰ`qH'\2I}$=&%Q{bW*9=jWsadv !^0pIJSIJOpe85'wL;lxy B5P l@) +8Μ6;v !Ra.Hp2gi'}/YK  j0ӯ^y#CR16{ey .&"! 0G kq"z0pDr.$UbGok%/:~P(`^iyaJ&ƕT)JSU~-Op vZDhQ2-@qWm'!ܮX)T^9z2Vƺ,ڨ#jx>EdWS%T J'h)(hU'QїoZ1$p$εr2zC{$]^31 >ZD֐kHh+ż\kt1O`7Z,>[K+g `a?|U`[Uy4{9H(`vI烒8]wh\Kg4e{|7V;7͒l$F:+h9^eQ4PLMDGI\tŲ{髗`,/2?Nl5~tS!~l/;R3:|sg׼3MBޞ݅x9 /)2ݰT.\q2mf+Y0m4g@MVJi !kgq+Vb3w n\Q \qoj8(S \6oBk )hҿ/p]e.Q\$쁿BV,hhn醟l5кK~2r{/C~7sjžkgi:|G1ϾyVhg:_9u\8|i^/x;,Oёbtc#!X" i wAb" 8hIt`s F8P!RȺ++]00뭝q ycXdѼEc.P&=xݙ6޻ٵiy|o '˲}'(۞/nKbz^iOvW o/ѭ~ڃ$%^\h[_;xCǏ_fk3Z Z'j1ښebGj)m #Lc;RBoP>5 Ec4%MɧfiYJ&|ş9xxesYv|dm#BKTsKm|(;-պ~.⛙Kư5NG!z[>,As❰;fR5y3G.LG yA9)ء=cb]6 T&FRiEЁHOPD́9&9«h/q ݣkWW 4ί!vl-ַiڻ¥ތ/k&km>M1 i:;Zxn\ . 8#[ ElL 䴳FF `#VG#$%NтR /EJ[;4Zk_y1'9 0{T}R{ ;Z&Vrd=^H_mJ'66UJ잸M=M"o6:Gnҵ#G#ő7wwA&f鍵C> IJCznO<`x=^w}˵ll^^ P?/F TQ xSIQ=3hh(uyƧ  J$*q*(qxNd!zI8AOLb >)Ũ@dT K(ǀ`,遁`|.#s56}#M5kmH Ꮗ/{ n 6އ0iqM\߷zfHDۓiTUSmN&k{7Gxjo;gga/ 7pxW+^6s3̼ǜsBO0ghTI\q5(Ԑ7&H~65u<,b9AM˒:oܦΈ~3h8.S4tbIu}(IERwQ4lLǙmƌlnikpz[(ߣɝO=t{h{=(%wMUY7pr&qrMh7)vz]t卮&u{eTWut[{bbPt7ޢLW2R kt [BȖwCbuwn2{1C' vLț?rRb'wc\Sx0iGa>v %g38Ҝ7|N8z:;h.4?~jh3߿^x8Zg$o//awn3h+*Uh]E+d,|Ywz=#>;zǥCgӹ-UI3I mY0X8Tm4I ߟ~o6Nlxvd5g ~; ⽞Mf^0X0B>ċ^~WrIoo2KL^c6O衑v8=G9ck%\ ƱibDzm _!l`ſC]h t3ߐg5,kſbj?ZӼF!.)0JWPBzZ #kv1!y&@—7M`]hU^c'>th4lj `klB,cV<21_vr=c Gqx=RfځGf w* g[&[W+v1a?m)flވ$^oraMSv~m83QbK?t˿8g<>]\tU!Z*hV&ʍf4$,x*!I()P"' 55_^ix1c_9~p<\>0ҸD8%R.&rRkGxt.F¸dXzWe ]5#_=bKݬt'ô.Qw`W) OVr -G)ڸ[r/L%6sCF1ZMGv+hhY|N{ WXqnse7ɕ8hƎT ^ʗ@ƬRD5f:mA80F <(U@#T> /(ayᛋg ?M%<ݮ] NRtL10Q@5p<D0'#ZheʛhPWQYXp/_uqHS-LЌ.GxeWeC0Q}4fW߿.&˽bJV'Q ɫ*:#a:֯NsX 9$VT[IYDO2T czm:v\#V>$NVP1HL4EP~uPH`T<j[#TX;{:[M)^u-.ouPzZNXnP VlR\ i,BHG%j@2>9.h-D<1WM@)E/٭ĮfK%uñS9a-&'9aEny)iC9_`EKrgphM>/E{sٮ+jȏ$/,IM&N;  }BdQP~%qΆQjBHT/\4$Z7mkB2N  TiXݖV)aXJ2-= dȸcgXo.sίmu7*;MF LrZywPBut%<ĥT#lc4]x{iΞ pfKە3A%62ad1ta]Ma<]L:ںVVByd(M .KFj9UEڠgA٨?R@/քHdi/8FXģS-2;a/OJa<[J>E4E-&pR2$G!q,Ϩ(GiK0S֧7"opjT,e;* G3硰E,nXذ||J{Ť@( g.n)dr3M's&)㐂d*rQz.-cSŤP{`vhk3Nk(ػg~Fͱw9bR>~(,YJCPHZGw'c(Ο`4gb$5cr(~3A+Z){de&Pv4RSL1.5.hiJ#Aޮx]}zv h¶0]b뗷W  Ϸ{jO\i<%+lGF ǫτW3\+VJm 6YֿG?umu}/EɿhzML8Ca2~'Q8v }??[Eq~Qd7sE͙$9[8\Pի;+<8o? ?&o_yKD袭 ^OCۡ{lմiY ׽6mgۯ eϋzc`yZ^4l"(q Q3<4˘͢觊l])~LхyŖ͠yN/o._ d QlUiaw|WbW0EYU*TN8S%&r8's*>}cớ|p5\\ .֊Nzp$k zGRSbkF J}7JJMr-lkhu:%* q J Qj'Z]!ԛ/ys{t7M~r9 wq0U .xqyѼ5!:l06NrmW߼ǓqG,u^|+Z*5^XB+ʨ H4!YPpBb_?S>ڍB~ڜfnUx!o<׻f }{=?C6OmSlGW7Ot5f~읳yι]'mzPx嵑a6lAy}d~{A8a2^krdpIDsˡ}tw$ilgɅ=7&\юa'/oP xH\w2V Em+'AQqz/A>Qǜt>YyT}Rj)@R齶ĈR,8\kCٶ Y)\h"&r(1AxABdiWmB)veNI%%X%zV50'#˞mi'm1>;y#űGGτftg^KU0((ayY*>!c <$Dd(E7$.gҨ Ef A 3FFA@J(2@V0΅HeSlP I@ƈ'?LIEx*޸;!m+tJ{9Jt-k} %L(E>}6F+hcnb\"=_BB/:QQ1IBXt{s -MvvGRZǫ&{ğ<_h3rEyS ĔBp ڽle?h 'ƀ6l7ǐ3[U}1gxN$A5]@M#aOԁSg=ާ/8 M,(QѮNj#h@e%)i<%em~<Ӓ$n1R;Q :/%6ʣކmYyIrVpb4AXr\R #w2U8@4𵐠מKΘ%򸒑)%x4SMp`!iU"#DEްS-55gqZYC6uU5k<>Edŭ*$dWEdX/_*Z5"IZ9,S& Go jN6I$P1gBL l"nsdZuΆu9ⴷ."7@C8AQlY,Rƫοs`6AGN&%N[5ZD4:yh \"9y; v¼sϦL'KPsBZA+|[=7ZA0eM-!+Ž4RLjSe PC"<„'LJQb,+3g7g/,, io,Yc,}A vPߧ٤8~%q%jbp$;pB5{B%v2&'gJvqbx7'sW'q`w#NA#y- FbJ.!B^wsRzD6ŀ(}dmɃ=Q\ۿIgiG\5'KSJhIT^2ֻg-OyGWxӞۘI63sٯQ{nIm<:zM{F% ^19WGuRϚX͊m2VgCe p[e7^8zsThTsMR'DJ"r,19X"&h8[Uޞ] E7u>Qx,limJ'pF)cEɨY0H^ZƯ I6:HX;6Oz1Pg4G?/HWA䱧y[mks>ĝkcܽIH5CO3Ϊ`"x6%w<-pEB- "ʂ; H9w5+YRKq؄uɒ %UY+ki-PvVTAZ2 7XE 42*(E8fx]> ?≐-%IUޝ-bY M[_pc iv0;%WJ,+_i i9zɠu[jPvk&()|=BuU{\Xkax\;J6+eOéֳXcB9_Q-(˕iA D̥hT:rQʲj=;KEpHޗY8ˋY-;oxZ{ϱHOc W5'i`b4qp -LŞP%t֬D0'QsRD+KYNʲ.P5Gu[-WCm罇UJ}Rʺ/{VuF_}Rt}F,u] @&붚apO3.BC8H0n LGx/hڏ6e$gR=ab~11ԓG.sVLyRlx# yUM//ckHߦηlfvoQb-,#I g%WSLH׷^J.$ $X-> d* jyd z6hǷ9dj$F\[3+Pg+pv}~ؽgp~S NY882O놣({R\׬{0sc]{ }Hpr" $&s7Ji]TiRj❰;n(c\[k,(8GԈ.@Jf#4yYq~.oiݭŇ v3/{\ܺZeGOΙly{{c &ḧч`pEAQD(uQ#'[IrhINPPFFC4R9XV[pD.it*Ř[d혃;9x-x2ҦwG_C^Q4=iљp>"tW*NN[Y62:L{"=rz$?p=Bwgv{a%]QJ=5؁,hR$錧93< G4H"`gͻЛC@7locpm"=g=A?-9ޏ:y2g{9W!G]圻ĸ81b ˬ䑒7~3Cz IQh51"i9͚_WUWWYE+f;k0f1 \sک 8p AX<(Pdfsrzt:$k B^YU61`Dk@8CAWKgˊ,~:Dnj5vЈdժs;'D@;1)$7B6>#@ E*dGx˳qNKC0y򃲌B'48< !@ldBvS6&Oog@INܿ9:Fn'FgW8+ɔPŸP=hʣb J_+uӢCg?01{E}{vSAX=^ċn j>/ 2}Nou7i wQ̼|3.wS˽(}]$JRwT]-k֧pq61>0Beݑ=`{{=A5], t>$>=շ~Huіj?]sEW]jvlIUzfbOuv[{bPv5WZe$%6ߦ[G vfΐu~ͮ9t{?_Xg1-gY)aҝyu=nBVjJ?̿Og?}"3dv4./ӵ_ߺG}q7|]Ygu?4I￝3&lz;M~zO$$&'8yf(8P WB&pLA ?ΦǓߟ/qKg?qqoooJ#A YI#/چ&Hϟ'?qPCuvԕF֯*'xm*>)|,x u},Vm_4E+4Nv䇯;ikMf7ث=钐Ɠ☄Kh]$Ir2KHOx) A+bfSt-[h_Cԋ\]|R͏6꺑z-k BRSbco/L0w6q1r9Lt4,\TH~=ʗ@bS_G>NSGnx k-,cFy*ŬN#,7vlᨍanIIV;h^xOQ CGfٺV jGc;јkjĬj%&{.hnoX߈&!CC/gݓɁ( ($vymPyfYVeS@2{ف] ,~y=[3g]~4.)3|VHO^}J sҏ ]5nƁ~7<ƾGpX5pp' c)a=Z=C?>_Tۂ[HϡHK &!CtR+eX銇,$mĘHs&k.h2PRXkq,Ow>ݽ&}C~ѽ<*6n_f=V{"S8WuۭfǺY&9z{nW7䖆W͐7JLbJ}>ΗcGgsҏߕezg|}qrF%nԖOggm&DY*&Z%+]ݓsiw^7ݫZۈ>" L|yZtGvuk{ln^kjmDе/+oO=mOW?}{Ӊ|FjIZ7cVHiEQZE_j/ׇۨCSr))a)E | { xfđ@0J=ٳYCo>tbvOڵPnY*/.v'5ulb_VMX&wY6Sevvޟ|qVY+ݙ_vɭ5d wm.~o'6!t{: = 6oe|d[1rQ#g?K -+^s\c^ !, [3%s`HjGx{#j(GUDrAk#Kp䤵WwZ\Fͼ߭'LيwCA$2x.dtGt(|eF9tJRT7{=܀}׸??Xls8ܼN,'z;me?xH[ *Ц3ګOdҪ ?s-Z|yTyp AI+O^+븨FJm>fFȡy[H~v# ^=n yD+MR{pLŲamMK6IEP.JY ,Yn ,X(kWBk!D`9d uzfPI.LCvgo쑤u_(}x5Si'4 2t^_$ݷrj\$& 9zPSN)'s`)%I5EQX0L!Rkp), 4k(3, !K*Q[zbߛ癴&Ȩ)"BQedS:5(n|X*r @xo.M!cn!A. F/|X# o6>.7\&Ģ sٹly.;hX\W`TvH:f^1"K @L")a p,iVH?eSrԱ29Tֵ=em`3{0ɵ~:bX~?v?mq2;:x:8?tc=*gW[C+R1瀊=#%< \vx.pUfpULhR_ ]v^烴&& \5ïJ7W g^jzrSOsS*t+ҙx٭*zv˹`o,˭66Y|ˢIM`#Y^*Gw'ŸŴdsa.Y=LIqI;ފhYe8JE0$%'a%h;yyI;) dg `Nh,Cm)M^'% D.tܯ<51-(sZtj\s9Zp֑L 'Y o^-7/mt\T >j5tE,=h҈HhF{e Wezf1jZYEL& P.dJiuq4UR c`9Ǡ9Oi'1mghi;ˈF{ZD@M1ɄAd2:t"ӈMt I`Ҏԯ"R"3yDμ2#a&  Ws*u,{W.# ZBI~;LgrB.}5Y\f%9*ݡf"h./g&o'h]"?K&_P >Ti@Vb( CcgKUۢl@. d.?o|qף!}#'yR};@zXM%pIџOOc![|]moH+};vWp7;sX`vp ~%E$(YEYV(NĎH5ͪzOU4} awSœqֿ'e4X =" 3Q/9gSpz4o+>T{s?]9~?G=GW'Nmؽsk1ﶅ$3rfyD~^ grv$>.llY:~ z+h;7ՒlD{{ip!l'⨌3wT#[,[nsauJ|8˲~Y[y;iZn( `k}_.t̯21˗~uMxUNPdQP>ܠQ\B(Q%gf9RK)]9_]ov?&f7ֻB{ثb )Sjэ~{MxFO`~Λ'AA%H6QJ4h4 tƠ! (b77/o4R5ohhڔ)Irlh4;^x-H(S$Ŝumy&ҺMEh+Z7qAX.nU 坳F[tS> }-I6H/TٲL 뙱1$SI6ē\xsh})*6>"ZbB[w\$FS:]jϲBo|t. {.)*f^tgEz<*>GM0tѹ,B`‰Ee/ s7On ~*'Bf2lRY_L㏟Nh8Sǂiϩǔ)h 0Nej$FYD[y2ӣ.K~n mc7d7R6sm|FgZR>jCrvqiAhs(*% PGJ &:4!ǻ-+KE<.vnw9y)Uko] !Ux1N`c+(Z Vt۠^oȿ^u7ґ;rkJ|l wWƦJ;\*øxEyF[.gdيַɳ]q˙Je.s\:ܦMPD*3L yGZwدPurw JXiP*r Ry#6һST2e䕦rN腧kȠBq>nI?F TQ ʁ#TReg({L.9YX,0X+O(q<ܶ,otI8IL:D/gSWʦR6#SSv:0{v*8ϫ5<>( $ !Hv[1۠TF@LjW)z1fb4cyn0ѽ <0/l9^;^G cid)'B: W)qD;&%;(Si}`MJZn5:{ф㐝&$id7pOOI6/5aijW'=߸$Ѐ0$)KCL'@ B"83!'Dיcz 㳰1eLD(X&1h' $JMD`)p6E;@^OH`MU[L> y-ܛ_ǡɒ2[;|~1:ktJoP7!iݐ\;R#ՍhAi3OHfO9mn]Br eKk!䫮!vC[aSσi$!>[Ʈ{u{]MOeN8dRRKv[un Ejn\}כT̤G)O,# @zg7_rpy \x?r俢N^o~0.ݗ8۹3tc|o]A0LNvu@> ng~mTy=Y͂D !̏ϫ8wGϗC.nn?R J(8PWvDDp aiv]k:3>kz͡_Y_mJ~TgT683-U>kyqI ~E}S mvQqU%Ax l<<?o [?0CouU|7|Y+c\<7?z7LKO{mzyZ-a)I p_ (I}N'( B)6ucߛ=+^L֯4m(jR >_eȎS !Kş0@H??(t״ڕWx%1;j/ԗP/8 (fв])U7ɋe߳0]x Ur=cόُfyt){QO<%4߅0Y{UpV#fh6[C~se `9ēeZ8Ox?6l>x9$] EʣYM!$G- 9C $xň =.tY˨ۣ6ķh3'FG1 (Ks1q-3*6F.gNϝV;D!Ran}`H {M@-rJA0by&+VWu:N=7%6aovWC׸(6bIxA*R"RĿ9g@(K6 i c1P' ӄHtH,U*^lXAS|Swehq0yjʈ7Bt(J7%Jx"d,PDjg0ߴҴ]CW,#eIj4*9AЁ@G Qu_#_[ /JdUAMcr$f6A!:Ct[ t 7 С0p \R1G8SZB:hႵpLT4p#B9DiDϥI f& 6 H4mH 5uMJV3H 1jMx\NG4-l ntЖ'e!")@2xJALmkmcIeO QI]d/],Ƣb%)A{gHŇDc<ϚSէrR1e<BJZ&с f ArB:! `ΐU6lJf&+`,h \r]#&|MC*fB]\j,*Mvsr-7B,!)2C\v.[v$tZR<`i1VQ#]gmR I&% BK9Omdc؉C^3 >JS8PntpSgQZP-WqkhۋA]Ԉ R6q-f[޶Y F^'3l4{ Y!<.>}7*!5~wic-pIFkR"* +Iz!&9s, 'PZk4ԍ[ rJв*r{_>5߮G>5Ibs>9]W&bV 4ݻ;Gޟ7u}7?'7 KExt7jܷ} T҉ߌ'ˎ6IO1盲 f|{|ւ\fUeƻ7wn~zނgE{E崣PIRƮӆrëOerOJM?_gjK:q2(AӄLJ?iz`}'my0Lv1kR`*ɠV--ZG@2 .#9!:Rs.{FeZb*!y\0e4WWC̒eo0٧Djx`}}쨿@f˂TPaY4V YYtzc :1K&Huck XىcF@!a6R~-&' < -$Jc+kLEPk7dVY*K,^ sQ0 lJy 6L36 !!elb8biǡVTz݊SL"1mC&RtK>~# r /:ȤĂIP(kFc9I;WVg=ebOҏC-l`M㕒9^) cn6$ ϤNTTjB "aR]Θ@qBf9 D.vG $2hD $PVك}dg̥SuVӒ"Vl]ܦQx.sqG 0sqKQ d #D+gXvqaOVӎC!T0{xV1E,Wiےg~Ԃ)U=Jh&:&V_}BG)V:[08eQ k@/)mOð}]p~zVXv|;(L߬}?lҩ^a;z `zVu3g«FZGx\?Тt6\J0:SJ23l}2P, LZr1Gp;UGJRƍ_V!Fn`K)e8W!9˂i-"&$7Fx 0ĺ;z8灁2wgeiaqW.W|j 3Țlsw n09a 879\c 5KWmzNbj_e0OAwyBZ8Ldg&#jÅ2|YGz56$/X|'d61h4:{g0:. ]Jf0*{s26YPG0DDBw,)E~8Z,Q-=5Oׁ̯NGj}w|  &G:+EpsnnnKʳM<9uil}s=fp8oPx;.Wom.8|2;}r_j74)Kn|Luʼ:LfnV5Pcʿ8ҥu+҉ԅ.mH_\G%`\w͡Gk9(V(%c \0+ t٧ gb =~ؒpu>q :ۂwmvpaɅT}*ޗ1Bv:0譌ډ-aZ#*r9ӎ&$}'M>P;f>tjk{P;% e}@J gNƲ -[%J;K.OnS5w)q!X@zcxgQ1 7OhY]18{ +kzBmnjk;+\C4]lN,O.w]US}j0&gn5M:Gxd  H:FFYg \(7>̙@ݤ4tat=+e2m'hk(smȍ. X` $A^b: u\[ ][ͫ ]EԹo$ׄ}oX d5Jr<+Qng  `o;|5p]I_x{x8^M.i*҈7͇ Zb\.ȬyʽrO3RrէM|tq O8uO'|D}9}Xz\‰%rckJ9MY"]P.*hvBsjy*x67*s hL}gfgCdf/8o5pQLS~)4O$>9EPcH+I #/v~?î&Yc6kS$CR V %ҐՔF%=5=UէO)Ҳ"aJ#3s;:a;? C]mSN[0K5iatHpmW0wRIa4/O/ㇱ`Fo\X4zbʩ|o[V~χ;\U+xߖ#,4xu׀d;X5%}V6m.giוCxL@eA @~QH5L䙩e2=uL6,i7 q_3:0|z-גmy!h!dlnp/F 8yb Io[ 6eh` E# bGIۣGA pSq#&?nf!UdV+İR8"(*ZɈ(P'JAF#(-6gYl۸(N@hgyVd'ur+1a%'B;]k2 |'` bgTk 5ĕH֗I!]WXk`$Hͬ i_&9r "?TPJmq(HL &BĘs<C;iEO~66Qjs=C#JH+(\[R7rJVTF/gaԛu u[vZO^4<<5`I?6d߭v`/?6NfD[Vm #Dj~{' *rT,QqULf ɴB0,-2*Ǽ.#r46 QB״\:=۝^i%K~CŐYlaPM$҃Y"`'QX_j7')xk#yduw Yu"roR\f5A ZHR!,JqbDNJ`Dʙuջ5k1LY-]o&!|ΖfrtjRƫ-]D=.Acǯחa|w2=TOT*:Ӹ30: y ywZ/MӚWuDF?)-L>Y#ip*6?{INXJꑝ M*FT.+%F ʫfG`{RƓe/WJI Km<Ly̓&yo:nJ#-ٵJ/MpR h@7}I{2^U=取4kyM=ԩ,e.uC*3>u鼛"PoqS]bg㱖q]Nabdun-R}Mq1Q_)= "kg;6HTa.wm.Ͷztϣ& ,FT T_]ˆl oc fYEZ 7Z7,]_Z!NBX]EԒt* sSeb"(b$EBaLtKYv`g-K5//4SEWx]."l Bۈ(F"sP-6ă4X"T xZln6سl2S36n9o(:|G ~0Ҝ\l< ⣢-Z(nq]4)np0sH[]D‰ux8_Vۊgy@m`k.P(kbh՞6#HbaJ;<'=!_GpտGiEA^efmzpP}1GaGiD煏D,0\XxR?pZ(kOíO-᦭vz42TqjZS$&X{RKDzaLG eZ30$&Z iȼMك*ܦ }WFUYnTiʞ\mXgְ+ҾC[2ΖgZ-]׳]'O;5ք5!7V8Km0 b_ۜHHWzv3i[$[vM{盋s$-z^r>$*ۓjA!Bڮ+Yw &gaSbj.q7Kwt&|2aS%ROS;pln|2a9AT3B T=$.Io @PBWsֶ.#ފ}/YG:u݃_]Ppe8_&qqBE3pV"N"R!$ۈ3\)ܞ0`xIt %^Fґ0BBDN;8=RM;*זFi I e$\ P, :p0574pDȀ 1( ('8&.΀{ ~M ]cl ͥzD##vq Aw[!J9OuTPE!Ϩ&Td K,© _*ї7 #$$8pFBb#1S, A9I0sYfMd3lԦlZ_Fs/nxO,1h^q=E%&I齥,zAd.;ʕ-fΰbiǾe{Z9 ˘,_T$.ُT`|7r,fxaUN6USl,>^TkZj?TY0 n<- ET jڠ}/)} {S]M3pa(я|C>svC4֜e20<Ӂ)Xأe̖fY1"ۗex |XV{]y gĄh_ h9]+]@ !'&ol%hΫR@W/xM&=JלhIw&pPW/$ >m`rp-M27?2z9tcrOt5'\r{sDg2NWaEҕ&׻/t5zt5QIW8sW7V_$/My=WGpchC /f#'$ÿ_?:;piCo`p.F67GνbJO.ctmC܏VN~T6eȇU,?ߎN?Zc1??=?^@8ߺվU2><2 6JSWttء'ej~˴7t5u(Etʛi'u5kp;|Zst5QA]D )q{DW. ]MdBW-]2]@=+&&\ BWmpNWH?7+I3w5 ]mp;?>Qҕ:iT$hSuʛz9teNnFۄrE 6!<)M<]'\ٛG ѮD@/%ÿ֢7O`{ziv\=}s"#3W\;hOKoqA41m3֘UCܴlRdS:sړaW׃ R6w&,=yx.J.Oyr Wf9Dox/P%RޫpCW|:QC"3 %ZIo 6O+\S㓓b0}.^+`s?708C~bO*~+BQ铫n5.XrC%|rrZE.'7jYw?@"sz~Ff3!vmӮ/ Kt~tr=}wS0_[ٍZKIU'.2fmk>К+LjJ0U!orT (oO#ewp@P}eysF; #-y#gɿ׏k\RϞmX!x)ZhYB$N%DQFLR\fm}Z|KHs ؇\Zghsz9Ϫw?~u~s#Su.ш-qQY]IYoj!rE(; LcjL1\ɗ\RQBR!b/KiGxk1ƎlI|@#ܗ;q4nX[c5B}Ta,( i!>k qM{Y1J7R(b01whFK:(FyBNRѢk-\O߼yjɖu#rL t҉swni{ntΌaG5Cc1oB~V蘸ruҨ%gi=M0fN"dj xfUaE#hC.&`gO ; h4+cUrt44F9Ju~﬏if8OpG;Ј̹G|Kxwkr%_%FRu*L%KR8~yje=t\@ӫ*ep-܇xua܈$7USyz/ɇ5 nN'"NgIj GXGQ1 6!Ib`5Hw-B_2,$U.l%u/I"%x,U{6]lTчp;4R%91"s[ ],9!#::X{|h.@N`]V˂OhBZքcb0`Y bsB6X"uIj{UQRA}<䌂y[InU}gHH/Q!4P]Ù4`3dl1 D! 䭫"NJaX`4yY ϴ0z6/.^b]Ҝ143(rvp'y %`ۀL'/fNknL/rkU0Bl^W n.0P#ha O{T^Z, *}plsV2[H\؆Bmf 2N:!屈`tB,*Xv##=dHiDH2# /Cx3#]Zˊ1Z|OGbLP4+ jC0Pg#VH܁m "U,TG#?{u vdVF^+Hlҗa|g''x}~g,j\_}s6C@FG˰xuIK6PPȋU$ w!}i*60~0d DiHʠvo VR nkҜ2v 5;7cY +)dUh$jF+tuv! ۦ5UִV4oKst՞EwiSd<<7 Aj*x?{ܸKܙ鹲ʕJ}؇!,rowO7HIXI"0B~jI?=4r/E8Xe5ݭ0$,̾0>_.QCJC6m2p+7.i).5|nk)3 Q u㰋QY43EE[WLg(ߎRڙ(rYul[BѻEy&hec e~ao#5LRzZ oJL1'`C0egnC>6 &z))Wd |RaԃtPS`cBq*BOiSHya15ԣdma %./P+>W>De%ʅԘ'W CM4#E^ %`saP(EiQ9F@J̣( ^87sYjbع@= J mDR8Qy1,@wn.H@̱z}aT*)E|ᥪId0arEbGܶr?9 $Pm>^"y܆dU0'""U~=6ivm?eܜon.~[MeƺMR0>TIC},OYs-Wcb'ޜ@(WGuߺTzN g'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N vhN  tLN CS8Pn8 wJNI:,`N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b't@2x@(N@ֺ;Pez ; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@uy$ D!'U.'jN Ti;^ȵt!:; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@/ ԩ>: ;q`R7v#:5}>>R7J˸:z "U}ChpErWT*a"J1&+5."\} !cf"0&+PʨA!\M2Tǎ+Rq5\i*"\`U"Ă+RKT:˸ |DBa=º HscǕ!/OWBL EW( HRc^j"XP$.>~J`\MW^XPOFmo;CM0VCrJnQFY\]mzWRabn/j.zؓu,՗z^zqYΰ%͵$C'*~۪klzuQmZ/ mBݾ;Lf*~n^KfeA~d%iW| yӨqK}gfQi@)^!M҅"V1׺SM<{\7M׼S; YAGMY=O>l˱ua!RS\l( ;| # Q$WXBRFoB ZrPbBrLh N:&wh\uS0FV&;3z3>"\CuS;~\J$q xPnrW֫T˸ bJU4"&\Z;T:+]P:\\b bBflerXTLɠR^Qn4kW֏>"3&+:Jĩp]x֮hw_Ǝ+R5jr* ɕ>\ZT6\ ׌F¦=WI:Wz:NrWqq\*5t%Ev5Ӯ/iʇjsMK Eeѡz%[oTYyPt2 ^e JgAJ_]x39iYOMWҤ ( c#c^!%%CmMe0N0 N *8%d<ջ$WEZ=7 p}Uh9a&Q#xI0_vk UvRȢWquKr; H 1z\J Jy BD+||_"Nǂ+Rq*5j#o" $$7XpjciJ{AG+qrtԚ' qe"•IIo5HՌ . )BVhpErU4kW8Wqt5E\ {uDW6 U)WĕRp2fm;~oTv3N 3VBS7jO4ʮQȣ<Z(aYj$&`2R h7R̄`$X 9pUoG S&QȖhgW āw2"\`=6Ltpj=Pe j:R8" v Hn z cT @]ulC4"> jǎ+Rɸ$ `uDBZ]\bգOI (" v*\\/c BWRqe" D++C,"Ǝ+RKĕSBLkW$;R+D4 F]JWSUvuspYߊՇ%M%=N^KU|ܬf[a#vNJ=Y?mdmXrտ<_jXWث6' w.]]OcgԿO{ h<6yRwUNjrO]]rOAۏ͓UUI5ك8 ȁڼܩilkwnݍv ۬ݑ %w ɀUO$i*ά W] pPID(WhHsow|J* N1B}rtGulAcïq!Fné$;~#<v,3!v௓#\#.VN*k_);C^JCDBvҺNrI%# V0&+Zn/&Xpjcq5A\Uń+㉮HwTqE*=GWSĕVAXPT6\\ H}tE*]`\MW'(<^Z'MxSu'.B:|2Ӫ,2Qy?UwWt~?ޯ֛l>biˮkZgUjΤA*͚V.9;f8TTgW*jYft^Qh޶[6UJ<͊\\\=5xP)[UjkwܭflcGG};yga')W?>d-ƅ_X"o~RSe*WNMnDvS{G(k{@[@{Hg|xFTV>^|p)Pn\>+4oԇڣB#[\_f;د[?E1B5{jڤE6+or{쵡L{j֥EչgriέE6 7Y*iyvϯަoM(m>LT'u_mV}?$-e͓Su,i%F7K]K+UrMZ4ۋK`Dvfϫlm+?z='VnM,{دo9(3_hjOC91w紹DyuaԽuRV&X0&cS|=fJyHu<6쮌 130dv'$s{̥iz?`8Qr|uؾ~#-#G;ُK8\⍺:t礮᷸d.u\UVSWWSQWo(~Y{1,9uٕ\USWWȥ֨P]1Jcg2|+9ue'2K67QW9't z>ڇ[I;%0ѨP] j92+dWq[N^]%7+TW6焮᷸^.ug fn w^-ІD|4<_@KIS'kᤌ6yI˧{^PcðUG `.%!CL̒QzH% )Jj Yzp%/2(]+oqT+my3 O 5'(qBhǽJxIxZT457}b6[=)ESaIINr|N)?P>kV(vfZT8 C5]~brZ~ A}aSগ5l-|9nz.mS lIt{PGZ8?oz&wI--[ޜy_1;uir/(qv;! 5N=.e^|޼"K(E"Ј(Πr|q\wstA{]98F$lXxQqۤT^Y*d7x]ІHBb.*N_$h=I`"NUfZ{dvqo?K"X!->ǹ;AQ=*bzC撙mjӛ-ÃGlFR )׷u{{YĮwvr.+4,r6cl3.{W~n#p(;NA빛I܏jG|ĞV(W`);8]z:~0b[㈵~c>)j =2ۚLo?aXw{N:",DLf*s;$t5JT]@U_+po&RfB8pn㸗֯*af>NZh+UKVnׅ76aLC|DKs/@GJtBG$OY>K?W5@"D{#4+%pт$GbHH D QEom>k#Ҩg:hJ@1Rk-8P~e@jDʘIBM O[ O\iuNTIEX5Cy=Bahc6(Q2G$#-zcU ; Zos$c-݁V7\v8A@/@@l&ժh~ Bu3 L ⫋eVk:pM- k: tlj$׽cYʫjkD*b?6 5rLޡEy ;v5vr@֡?û/!-!bV K!{ ?‡w2>ԶżȰ~ǵa8 VzT.3kЮĚii#vz֍Viq;l$8C]`t;++W4"4`Y; e RpFZơJ@@ehH@&P<1C*&ơLPQ`T Km"ng4YnMXyܮmqIkx;B9OkZCF%Y{}vJLJ [5-N&8 p8zJG\3yk3;Y5Z"ۧX ,~bWܛ5o߀Kٯu7aPvA=3c'w.e ^F%ˉ{_8Ni2EBou^̻_,r~ǮO[?\uڱxグW*aYsGacRĵLNFzH'%5tO -h1oI,⭑`Lǿ}΍0 RjCdZjegydʒ:zRR$|z+Io(S|4HP;i {:pSJ^x}79:8au*Us;xf4=~9^Ա %gs8BhM,(8 (iEЁHO3=lS]_v9 Sg}M٥^K9!hT6R uTx0͌Nlr1μ xCr^82?ݖ)]  s$9 Q]~6gnu~\6(%27٤qgb*/gX($'\2I}$rK5N{ ^1ׁl5f0;h=.Fs:.I QpXL>i;&by.18ޜ-"gBSсemF9sx1dDHQ]PR0$jcgBb:wb|IA7e޸lwE$&q+R%ϕ:DEdš*E"XǺiƀEӆn*G*L_B+E&VѣyS‚4Xu3ȫ9r'])#jS.P9>=i}Y>~^JYT:*rMh^AzߚQn.*ՉɃ'wׯ^ZB"LAW /bm2^}nǾ)fGhuG$$:N(OٕL>HCǎ3QDL'ˋ`mhN1>,#[D5,V),; l1iP[>V Q$+Uy)_>Er:*G Ww͞VE60 (~dx~my WRZޣ,+;W,/K0uZmؚͧ-U=)C:]|k56g+p[{7MЯ9}$HDlTdKJf"1F3o(47E5{>Yw`Y*3ppN+ NFMF'8uDuIk|eH)>5Ze܌^(uj,/=IU=cgʣ:4|6f+]5:'Bq?),?ix;(`||5FjRC> u&V[R\MTf+lf']vax6yX:˭g=Zzc-}BR$9h $2jkEȯ2JC1sۏ>/=T2򶕍ԧ}mѳ9},;TK+"Zcb#,`nO:(+=@{fxFxe.P Q _bV"N(qLήSnLl O='uY/]tF(Α5zҫMȂT++LMUu$ZUi" rzՉ+w w0;yeV%|q)utuJEe h;QNeY%I7PjI)l7DНi~˰hGHfv/=BR42WإM^TءlAU}F8$UfS&Rh Mc|p|_mo2FUb6{/bPl>C0f`NJ>J}-b󾖧=\9^wy qw0䓯e*?FKlC.䣨&C?w}t>.T>^rF MAy.|J,/{Wmʖ>gD4TWw;!w«ĘZ2\ҲOcfv/Zarff13Ӎ~^m뾎Cmna~v;]JP~B?Yz5MZBьUp;2ؾ*qg tU}po8~s,w*U3>,`v]H勶(gh%V ŸtLT/wJ9~y#/j%1Y*~KRWoO~:ӿlD_&zz/{1^W]r9ƛݟxߌ=eכaꑑ:"K7g4y^ Eh%kfҩSӛXMKI+xSlZ7 nË~,gf)iFx%>m\ ~1Aɵu~<1 fzb,eUz[a@ n7J<$vUyM$~OƎY^W'8~Xf~v~oy0sxwdƘzmջGc޽xt9.~aÁ#Dgem؆4^{zz)8bq4E?Dɞ[n7S7>!.B!uR֡疰]luqUQ:(x^8\%eYG9F2KB(-t_ev1TSRM+[T{g*$) \g&9ӊ)CNZ##OK5]WVd ix9t!G)v>{wŇ"C9%d>;HvӗyM'Wv.T\ &<^82cA*uBNFxE xԃ6F.]iCOxi),nc}B1bh@4%aU!mTZ`8H>!im,W1gukkwIdUpl&| 0'ǍB-)UUjq㫋뼉vouկg=\}-~E,ɳ(x` =Γ WR)Z}H;쒔4Y'w|l䄒d$gc"U2PFmhSm3!GT(mR ecTQd@epFB#|Oqe5 \rU4e7>8*C)2B£0%s3b>Ee)"7%TS1|:Vh-\q G2R LZ"B+yVH? ʲitlB:vBɄ*{ںÁ{?VZ8@(-w^<-v-u7o*(۽?59ԼKr+o,FޤoDw7N7Wz]` `Jm)s}s5_ n0{ݷ02gތ1Y5>y98xqsgWNj _qsNO_?rxPYΛTZZc8ȉ8ɕ8 8I8#`IgDeihTh6* ʸ՛\JZe =rRyo9@"3dN0w;+U^/tj1)Es~}/ݰ0tw>Cmr?MwncLq1 ?u??o|;y UH]]xH ?nX1l|c]lׇx75_#]}>*-s~ە{莁$۲#G־-\~nnn0/LחVdz֡)YZ(BF!웻8_ ŢZVpٳTȪ]ט \g+\bPb˞2lFGh@¹NɈkkeB̒v7)5:=0Ӿ>v4BqY4(,h`9ÜEQZRFâCQ9UtٔL`i֍%`j5i@!R~+&' <) %k7a٭q)pT'rȉpT }zj^*2,Fy@ (*QlsIq>8h"ԭhr{Vg5"$޷v Pѥ93)j\dj[jQM[ TfơPWlyW뛂7N^|\sOb Ŕ'i!" cVtg2,6JlY2]Ŗt{P= ̦&!a1Y au)`Ů&n/qŜծ6;Xjf݊SL"1)M4Q{ ᲌.q'a"٤|BK>V9u{\18LȐyA&C&&HCiYC68,SX8a/qa 5?E"6ժ9BGhd93錓^E$X\-rƴmֆӝ@bg|!餹 C `H<Vm?K=2siJj@*+bf|˜dvܫhI(&c.n11z 9ja dT.vq*8Be{(G0aӻm'- e?>R#3_Jhx{ Ltҿ:s+;z%e)P &mMgZz86Jc;ꉗ(N.'vE=Er?D(!E[QUyIA֓Nȱl;<*1YREg}+LN[%Xе GCbT4WZ4F"|4FWJkHWJ#_i+|4FWn+|4fJ#_i+|FWJ#_i+||5|;6FWJ#_i+|L`|4FWJh+|)K#_i+|4FWJ }ERnndJ:qA+; Y'd A#p:5-/_| YED0De8ЧM$%%ɤΑD 53飪WuxAʛ8&҅z)B!TzzfɒbIT!1'LvTK ֢1.1"3e),#hGv<ڋ8HX\\r֦h0r2[#FmLɎvv܆n:w+3U:si;`Rt_PK1m<B;sm)zL>W{$ xmpR~b &y3A$`_PuH0/:d.W/:d^Rˮ:WXKnQSoZM>}-l)K |m25`Q{]j֙׌A[p2i+RL*G:euk1k 1$OV53gЪP!F)CA<`ւKtJ̃ 1\h[gk;7l$'K0_6+)t^晿]xt6lTpoW[~ֻ||&\J[)#p029n0rL.TE+ շ\eޅ=-Fl)ut:aTFK(y|ۙcb_p:OZ<4Iɑ9jA1$D=zF ymlH*'1tZc~;\WC_N~EDi;Qց:$R*29 RG;kP$t}IBZrHvҴ.&ŧF^ |򶶑]&NdNz|t O:/*/]KGlF&I;Wu(}YsaX R(:HpSnwMjʞoʝ+5pGc5[DuޟWf_Cz'ޅz<`{e˓{#a?/m@dܗ2}(L@ x%3&WJY\e(yrF$0s٭Uי6$(V-|^0uAK׻_|)^agmq{ʁCQ\)Ya:%o$/w_]"Ү L*=R #˜v?Je筈払Vj*̀!1Z/P Rm6p$Y)HyD,-"o$ z!S HخWk%M,&OZ=c0:߲,NiӹS"c4 )6ss#dqqYIPo|HM7YoW,Vk-E % :Ƣ 8"9b,Q|Ĵ+?γyMQw3ކi]23Tg*9RItF1hDYȿKfk'*b !ett") 2]FO-3Ȃ"H(s9: Dpw"1҈ea&A2P$bs-.孫9וeA8 VPHXTL?KWAa1&Yvx6AbӥzBA*P80G}&b( om冱:IUE@fu}^(߽f$ǣU T} ![>ִ%jџqn!,yP9X_Ҭ7q)I"]%/mޅP@ӱ Mmoe<CcO!}3_}k>DnH|A}7fa\iW)VJXuڸyz%4U")iiWnޘiE%žN{?76~/ lM8%!-KޥREIZSCH R:,r" g Q:X-O#\Ccykf_/H޵T<L /~ЄeIZ^ `$T/}4iw:;.аݘH)GA- ܓuY>~~V 51>_׫suLRT-,͌m;r]p~\ .3T&Wd{ V,b3`%Qh\?}7E&mx@?2~UmWo /"w4h:CXv RY1 hsQ:F;ώ[RkP"4ͫI4[~u }@ǭBۺJ 7LFRL?Y%"j_WFZiwiw1[ ܡD?T<О Mۣq"jH:['p(4i#rNXq_N/JnMve[i~+||)z١Mr.[1 &Qq`)&%%ܫĝQ29sl% u-swҸ4s?'S,>Meݯ:}]YHLF 3-TJTֳxFX", :k,V˒,/dK= ka Wwu>_aה ;ڲv $uS2kÖ2k]Y+G8rK]̚Ph#օch]E||٥`R3ǡN\KuXs"x7]]-QN/,p.s Q@!a+*9iUN6=~vr<-:b*7l 3:Ai_OXёg-G>5`]';TY ݰ`Afo*Ye: 8G>MN\%1U̵ufވ>F-*zuWWaTu$v^žf~Խq~-ܩ;=Hz^~c!^d57`cqJI¨ ]9ɤAzRBiB2]7?lJ~{GpJ8-^'qhDOOK~%Lrg,kr@72Q;D! N"ó)!p|AO\"RbKnxKxbĘsBBD#m1thJā2NiG&؉<_.s'=((tBS 1 !Jeg#+<S ]qt # g/=QIq< -W)?=I:g$$b UDT5S(vf -mG:u'PԶf7w,N⅁}ݨpf_7,Ha,21esId#9)nDfR2d}rNc3o\~?:!m00Io8tl|;M|89H#HQTH҂5 %EI)¢_4 /&俽Fs  3&ZH Ơ`"b ڻ1ә(gUYC1zUcy7Lh38&%Hf. qmՔ+Ϸ^<0;sz\ϝs=wJuNzS;yj'OfSPJ9(cL6%9Dm&aΰ4ǝLO~~n6l6Jij`uVŢ( b$8P$X!I#5]I ^ $96ѸT<rR \)u@(lZ[f췌J3[L3-n OpX񖬩Ћϋj//7n1hųu"6RH,NB[hm*Ϧ4ؚ< I MX,6ںޮOo',%"8-6]^ s*VvkvjwHUe0Ϭh bH¸`2h>ه ޛmEF{Ȍ %uvlkRѪ0Zl@TLB6oG~\ dpl+8"w-.Vɐ&Ę/%0B Z%^pOQ ao huT;C"BJ33Rc$DE[f췈#og uҔbʹHh3=b4>)d]$dG$YI^͊'s&JׅP #%Y:de0dt BȜJr<'F!%,RNRJXMt .@k&N ϵf,ә,v+oRLmQ&)kxᨑ K)'%Z!IDal,,֬ϻ{D>ۤvզدuJtK41s u} gȠsuy)ȑ[~m-{Nv~$wH=a$遹g YVdeKF眂^[X)_Wh[?3>_Fqrjy~^eF96γq#M,?ի7ޫ߭B]|?"rm"{h.^~raݫH򹛵ou +kw~\?v\Zg˛YiY?`gg˄bi|fO%yhVk3CQg<{GzOpr~_*H >(=dIvhx~ZP%~–J d lo|`}IR*cB @*)W֡@(vl!v˘()g;FFN֨&dc8]\]ũ~ ,b QFae*Q"E9鬒FK/!H)WP:F+Y-b ]d7Poe^Q$"&fLaP>prkB_FX;4;((ˤ @^JūB1IB@Ӓy9DmC鬒Fj!sB`?_XueTDv.:uϱ ͱoaVڵxm{Lld&/{qy_VU(ޥQMYB ?!?ej"=lcG7W[{sNjqu[Û_*_o+oZb俬, blK bPD,j,br,jB,j,``NZD;V)5MȹR@[R賷Ѹd tX)@y|dAPo]NaقRNAc孼 >:|& Z`ڒ_fqSN!>r}Ѣs)iPlfPEƄ;<4pT LxsScMqs q_h'¾/~\/COڱ 4FPRIRנlu/;uC(u޶Rۡ41Դ SW{nYK .~ .ڶ%k#+9@QZWLJxڛ@Rɀ6S =~ؒpu]:m1ppߨ]0tޞ8Α5_[)82 Ā pI"!faS!]oGWy~?]Ǜ g~J\Q$MRr V _iQP" {GmYn>ecFBqS#1V3Ky.ž#5sm+B~FxzaYVF4ܩRۄPƍ(R' 1 n`%` dH[mַXl;HȸHitL*pSާ̰4N!ʓH&n?b *uq8 PG 6haKE1ab^`w1T\/;w"2? F9(ۀ"cJZDt"8|9!2.F#DCһ`8F\gAjW>H"CNJ8NHY@2lRD@3Ld ܛH׷'o؞ͧ # KHb oQ wS8qn8$EbN)ȳ$0ưŰϣ񌏋MO˿U py~4׏B Ĩ"SfdYd4^i㢊)VmgU#~}DNh/{}_u<..)/aZ ?U C ڻ!66ky))ȸc^P3h >jr1>n&H4{@=/nTvq5~7u7Hf`u/"v|IRdoFdKa7D0uN墮c&pa;)lsesmSBBti bP&$` .*'WP^dϝ4tDL+xƃ%jev$w5 ޜ!|cijvР-%ookJI2m.g)*}vӔŒeSqx fFyG~j0_ܓkϰu\͟79 j$~YUj Qz^xMqZP4m:r}]CY6|j 7S㍓x+rRM`+ay<Ա(3`HQgF^̸)+~| "Z!A/V ԍFFsp1[ 6Or/^ľz#1!obC'x|A &C nX"C KaJF2nw&$KmG=s: iפ\(jR[(Jm|, DpjH_|uE;k`ߩMz>ٻѮlcDYZ+rjm 3 zTHCKK)FD§ymzM/RcÃ4xc$EBr] 2쉏VXHS yrL.DpCLD`XYHHy0l\HZ#9pϚܷ=j41-e1Wu``7ox 9-A:FPC"Ut&bO5[o,8jۊ67ZMpo%!+j2bŮ7k<#`J|%r>/VqÃ/[ctrLrq\ +Mox+\l`lx.wo_>/i} Qh~KK$chYS$\-~}y3/~^{11o̷cI/ RUoROs=@Oث|>F9aJ=T"U% ?\>BwPW".=VX2~@ V Jrr(*Q+ԾD{TWD1+ X v0*`K*QKD%{TWmR.}SS̸+>UjP.r2˸Ni5ʈ9J--=7LQ~ֵhzD:g,`&:YЪlU4/~{OfU[qO:fϊϰD+/#3+ 1bC>d_cME)({ 4aWkxRlo>p4 T-G7z|FwZasOfjɇ(N^&^E1NV*̩ y%EN!!B8ш34!at~x? 5R>^dD~xZd.2`J?kj_,ߍ;5.ܝ7 &E\5lrAgÿ9l%>k&ayóz+?q$p(%ád8 Pr ;f8 ?ád8 P2JCp(%ád8 PP2JCp(%á^W(qǮPw'J2rی/2@_ [Z;BTPV"X)۪z(e[/JT2˶Dz--%%ád8 P2JCp(%á~8a,mݿQ )%7?J2J8*sRiD 03.7|c;*Ž-;x#w Ze*Z7!pudJD+l J-G֪H9DŽRn&x92J "(㎤R.k%K0mȹF ]h}Ŕ!T%Q?5vvYVsGb l]7lb>IVu1s=7) AtFBw.J*Px*j>LѽVErl+Gw#%F,B0# "`"RSFDD b FрG!eLDΉiTnXhߒzqhX2ؽ =shVR& 1(A}0A*1j 3ꄷy9hnKlt[jcv%HΠ.0g}򐦞sDRxԏ-86Ke$"_jSX]Ã&|"J4Z\ rk-=% Va+&T\"ۏ wӤ;k]1՗6n\"RRf)Di3DlL处I YnTcB2F눗20Fq .5J g=A CG'i5)ag'&8!$}4"nNl m״kZ֓,>f8ƏZ?b9SJz|^ƃzc336(dDFn%1-Xhm 4wF"#*$;A Utȴ,s9ӸW)Zp~OQ_+gD"_ &Cm@#?IΏD;Ģz; pBp`^X09ʁős|@,L%&ͻMinv?{&zQq4uztޫ<$[%k<c-95ivD+ M)F8J0Qz8dxz(`'`fmTBi$AsJ(B[+ d51h7.f 7L\Mwg!kpGta(evqy4 0OqQ0bj3^I) <:NGiG 3UFՓ5Ϸy ?(rOe`*Ub #h.G#Mv4O2׍'?9#7kz +e ŘLJYg^Է&{`!:M2ç1Xè$ oBo{aOÓj$,+E5~WOlzZL1eSo(eo\5BQnfZ{/2&aCCt$졔udOAoݭR5鞻2w߆HsYYD`&k3}hS,;Lb4a ļ,8aX ?=iyOVGJ2rI$@ j'f}Î2|VSJ.ݭ)7DY%^,x)UW2Z浯r6*{;vB ` a>/y..у*^UKOf0e.o_1 \f9|!{WmA_2$~p $(jm1j;)Rww:ɾx].w/ggxA'6~af{AJ4%#6Z|pw*_4WXwSR>}]q} [Z$h_eZr+/'\!>fnD ⚐u2c͕0spdz±EgVzwTlQ2zswEKo¶{5p |t_O|}gAQ:(x^8\%%[3y0*,U_eu1R텖V#(/IƽS )Lr)CNZ#S)$od>%V.9GWGf..y_[W6Y?]bߩ Ll1 W#PjXҟ7)|zU?o29H6ݗ'פJ~R[[*SSѥOCL~;i]=+#ﻟ~xJW%äpC.i^i&IvRg}K< 'ɋs.l tiA\ fh_6I5Y^,w5 M2-PnXt> *?;z6]`Q˻6=sy5.:{J/y$a3ʤd؛Pa]f]y^h\Ϯ7Rot=2h @Ssq1@SkМ/j: &8n |`D ʑH9YXjc`%3S8Tyс)y-C'iZ'Z\ L唷#'e>ܾmYWo!Eyv`SBHV@Z A2FaءbJxCӣֱuuAM4@gk<حn]mOjSTn9 Faj=qhDów 3zzL{aswD{Μ29{@Y6;j:lLmedbH8T uQieX2YZQ-8, ZLVEL 3ܥą`Qz)U1 e -}l=e*jib@)-5gY2mJ^zNk+n翾/>|3.dRYt†44ƨȲɉ~s g]Ny0d )hhNZi1sea}dTbPDQW Ĺi{Y\dd҃HX2!,pcU$ c<sЌLw,0rBqدIk7=i |0dWY 22Y72OK&{*\$\d$ <*Ϸ!i\x՟0"90"pݮwѭ(npݱZ{"ǜqs7 r?ՆtV67>s@4N&IHJ'rtjhr@Ҏ &`騭m m @i(vQAFeΒ%n~RmtX3_8E]X`y"St}j^n ݁OcUw.;:{1;OK.*奦;נ9Q ȤM6^yd7&x+~ -ii{8IAӵxH=';z ׄKT!Y9G,O1ќΙ+Ȓ +YZIkJ݈6I4_\yn?[&糲 i:re,1;/'OS#oo~~>O7lAٿFqݏ[[voM %'ˉ6Ip)rf~s~\ 5 bW=RvUާs50Ȋi o ItqqA6i{]5A#`IgDeZ=Zpm %U+beRr)i2mJ>H"9!JwV:6^Ԏ&ΞN7P"bvnϸZnZo oߣ|GyK_YN-쎪UxgmlIwrrh]"]s [r7.%6J%y)t+G5`Xٶnk/j)\#ò浒|6Rn^~1BC=ҧtfWoٓX /Lt_ yqy1 r_Flsd`mؙ] :̢ZV0ٳjD825&&0&wj+agE *Ǹh1h1p.S22Zd~4kTX ]}pY4(,h`9ÜEQZE#r,)[ 8[KJh՞kن2J]巘<$4IJ"^Eo PyVgDMmfN4pZ894Hnzn; iU;+˰--聁! AQU Z =I9h"=DiDBI\!Pѥ93)j\ˤjkjq<V Cu aԅG^^dȶzq͍hB1#sIAeOtq1+2:3uZ% ,f^ sQ0 HmJmY6LCV2 +kj~6k&PvqZیZ{D[q I$f;<:jQ$!\%$7L$OhɆs*'q? r +:dHĂI`(18WYVg>l%|QǡVֈvԈF6ⵖ9B (mF$J Ϥ3NTT!J֝ˤMo3 mHn6"=I'͍LH8{~H՞b4$_gQr^Ts+6Q/nQx.sq-G 0s1KQ d F+GXz׋CчFǡ*Cy>< ,0mApcE?Zɵ.%\?[&zMpO*) &mMgZz81HzGxP눢E2?D(!E[̩NWz*%= Xݭ'cيwy*m*1YREg}ځV "پt-qv^/x: k^~Xb oo?IbIawk?tjW؎^ofcČxڬj&ҘvYF0"lOwyKP>yKW~t]ej3i6~B vF{5OWGlw?0iQW%ä7cdIeiaTY g_Ef'>? ~Lu?Ǣq57/14Ɯ #2|;vIΧCHrBـc΋eu%Ŏ! cC"x4H'Jд#VRhk"$gAs (T3a.GR3i#{΃^#T.HRdY)_TuP`{ 1dὡhdyg7$I[?Yȼ؃l ZP{xvajGO牠5g.y6~".Қkяqn3RW$0lUsQW$lH)Gu9MT$\.IPnz_ld 'L*y/7/&eW4Rk|N7pNe1?_jfɒ$ÿb7jPMƗx5/itкm3B*g\/CK3sﺡn?e,9?r MЫ-oӧJޢ/̋_\Nޒ7: R?Fx"$5 @©d8NF=vC:yYNH(KHWbPE:I3E`|lt|.H+Ao` 5{vyɘa^sX]O\ġӨIf{+3Cs&|Fl*+m#ɲbi(Z0]==6^("J5Z~MR$%19ݦJ=Hy3.DƹKIWHu銭TWwzA &z1 +X+Y:zJ zA )^LbsՋIWlJ+{z ?oM=F\nn RRz7o~#=fw;Wy:M:{?O*q׹Ӛj"Az!N Ώqqd\\n&ӧåm_[IC1/9zęVMe>~bמּu1e<덭ۏ۶{뀻^;:]nռ37:a쩪cʄ!WRQA\ l`nVg H !dO+R>0׆>coWA%#YjT7n䬕JuVrSqZ*v2;ۜe[׭z?ӾWxh]{-k񼻨8hV9?+ksMtEL-6$RVB^C]QXm$н0S;(jQB"!1k-bm*K1rѪi|HO t1?1wҏ X)\CiR & B̹ 2"8 Q&$%㚣Qk 7V޸)ٲ{z6Ou8=:֬%)Y! ST&KJV2Vibki{, Y3)$FZ+ix&C)Fᨐ>ÄìrL0'WKn rݵE nkW2OV<𮡡6d-D&^#xuHu@Ky@(}r,*& ]45Z.*``ffS@yV-#QhXK _fRcA­|PRF>@H$5T^eO Jƍ5OV4zOy Q$ _R˨ՙ P #Ut`u4)@JTU Uĝ(BVɒ7I|ǍU@!pV(߉;:|ҷ9J>^b=^ExBS 5^Ls@ Q%($0&"Gt /c,C)9i 6h' u9.z-|x~ΥTAvRh# ŌV N+%cyWN Ie#KrpqkƍEQpILF,x4" <@ {pwFdU%1CIaI6c;*gɈ&PB@:C&[5JdicIGYDA`F+f#o, R)4i[y/Z=%f@7)ݭE a M{-J˥ FҸy=zyc9Z輜꯫g6r8똙D@E#[7@7۬Id3 5`&°h ]YUۊaYT-G(QZ$`4vfcN` 0c! MzҠD˱ZΨT@=ʀBj0FP-B>^ ==ZiaeP>cW ('M2P4UU7J w^(.ziw(bZ1&Ucl9zU4]GF$<QNlUƚGj|҂cMZ B$TT(ɐ+6Us'u$Gi:?o)/>ƛ-"Aj+v'QHL)#AU /- &o_Hl2)zJ cÍ+++++++++++++++++++++++J dj*UNwlŋ#xt(_|:m7?Ea>_QU+34C l %IGN ʣK=`/_ bdOeORQ=絡mZfXx)ipŧ ß}+IPlDREc5E//Y ŖH4b-kކjO<2k'fnTZ2>͛swGy,7WF?[ /7]/翶Nsh tO~ /ò)cc%SǷo;S'1It&(w]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qw]qzwj+OEUiPm.xY~E#6ή0Q{O%2nj^6,"` e* f v|}e;Mrl/3s{-o ~ v}k:YxHuqvjl#o*3?]_snO״e3nmñn%Qm|@P/(x( סe 7ֹSL!$lAFlh6z$rk~es/x^U2dв1pw7%arWg/}r{Wgg_{}~Z\sz)1^ostyz~w\ύ:EJ]jcH,H|Q/;v91롽{{m@b.5׳;0ZZuXG0_Ƈ[QVPrܜj9ρ{v2V.'>~*8k8G &s,+<Ċ|Q* o^TL4M> ,iܑ/ i~_Dz58{ceJFPY=l)݌2ō/EwEe^Cϴe|ad9ugǯh0 ٠L،`QyWݑ6MALH'3az|3Ky,VdKyₑ_敇ge~0}M?<9?/@x&\YX)(QΩB"57oȷ|KM~4 d2s ϸRC <"\.Ԟ ڻH$rƁ+ $譍r#"*i^8L.r@-{ٻ6SH~`~5S$M>DJ|nCصlTWUSK3 XQe0$u>`,*e>x)h+Τ@ݒ}UibTR.%K?(xR vly_m8cJ=֡=q6-Sٴ~NGmA[ 'T.C $TbJ T*1P%@UbJ T*1P%@UbJ T*1P%@UbJ T*1P%@UbJ T*1P%@UbJ T*1P%@?1ziz>SM6֒ o/}tw X Ax@``ŕZxPZMcRVy~F#*0ygt4H^~W1 `>~T%2xW܈@l6!MgtB 1CG09#HҚGx}gJ;'yх:S>Qx7e 9NYgN&9*NDBY@G!{ELsR3g%TIn<ǝ*{O-}PynӇUrsC/"=T|E`͞Ұ7qX f=AMmXw_W-zgp<\kU(?Ը-ygnͅOno/G}&m|o7TMO//J [{`xٸ]"- _W*V>WN(֨_aWp(??dzsǂML7" xdx̿5ʓn mM#n^?bUf|W {hxqzq ?h<".}k#d43~垏y#.{ οz`h2j.o9XҶ˜fs~ 4MafӵH~H.塲tr~tl<,r Ӆ_X=_7okP웻8wݠ= ii˞3+":Ɓ`Rw"daRte;.Ieud֎Pے Vi-g$yJx!fN|uK;Lvc7 NK*9j AyEپZ%} ZbWV)f iƩP³9ψo4xdzefşhyП~p$>g nxM(&Kp:%<̕ҒlcY0]=3 &W: ٨3vaU"àMa]Lۏq.ZmSjj+݋S(O,mXp@э"2$˲q:N5a&铱9!.h/!H!“[ %f68DnMcv=,&a{:lҏ cgpl)8"VX->W(*r,w0giyՃ,GB>} J8-CG.NK]uӒde$.VO㑛ij^F 9M:FEX Lopu {XL;N=3WJsܗW/ȳP,o_H[o)g@$Ȍ%rc1D6csZlm]ek'3{X#vYT?)3 --P>Z#U, #n r b YJf0v|mTwyDI[4yg7Tv'/I?L[oX Jv]i#??:g>.i҈P>DQK72;2w8RlD{A"ސH$, tæK:PKMa;fœYAΠ*H&hV28!k+%Htbj-f 9>zƚz?\]}9/ ~Vs!SVm6}/nίV`1LFEj3!ws֊RZѓ;c#+'ʲzf4Op)qA"&I (qYkg%ct5yXa7ϫf^i=]{RNֻOyo.|Am `gg؄aQ;%=uDoWߚn -nЛqwwC;vs0[aJ!D|x$Þ=qC {v# {Iyqiz/7s7i`ŵ埀Szؖ$2H6im_`!*v" gRPa02rk5zڣ>Yߺ`~Ĩ/:6[* iTr/cM~~xCxƠc>}<`h55qNVK ߛb3!˚oJp=Ձc-"w\oe.{&K~r4`Mae-Pe2;8cp̒4दN3+x{w/h nv|q 6M":$*&{mHA9Kdr 6 6O3%tĒg%Jnwp;r|MάW79[\Op|UYqE͙A`4gFq(]oΌRjR3͙%d}w?Ҹ ߧYGD.JGιoTu17X2@bίX̛we.x_Кly57fޡv?@沩`0\䵃va[ 9L9x([?m&_?`+e)8eox`Y2Y?zbe_5d{3+W&ی#Ic529fDdPjrpƼGGׄzaO+_Qϻy! YMWpo\ȿVMNo)I|b Q/h" cCR9&BbLu@xqtI+vRܓd) Ix61htRXr {t)I%mFe|N&QfQTЩn6,t"=-bviDk8aOӂ˼djb^b*E.1ͫ186ϟf & ޹jb#k/ 3M)Εp~/7%:9as+{xdǗf4 ^\.Ӄ &@95>&Mr,LeN OD1eb9cWR[vZvԅ.'ެźGİ;z a(Z2dXnk">E)xPL,-ɕs5nG#h s!F[R*N4xPZ}Yb$6VK dL8Y#UZ\]&W,>,zQZ!Յ FQa`,Rq^z|ulDjmS`#Ć)p = STMJI ]MJ K'2ƍsIN I$je6VENZ9'4mXѤE\/,N85$};Bf[Vo[1=6a?YIbV"F˄LQI}Ah*81gd`';<ӲKRsV%KQp&Yq6D>LNj͘ ;T9Wxފ~}<fyS/8ݧ7WT2#rE\N jP\ լ\N(zp9) -= YGʽ}z{9""xT##U7Q@"m{΅wy\HWyUkp0 9bxb4+%MJDm{H`D$4f)ihp%;G=#ck mq='t}Ы< %V"dZN "fhkev*pF[,uut g1 ) ztm" D,N± zeWԻ\lP Zj6,|g\0i H><>[k;`i8;$އj O:PI%!O ,(;q,Uj?6/UuvgkR[ݵe'.>mOcytWz#[e0Lg"Q,syeBxD;1Ua3ZWI=[Փ#JV`;Q45XZ][y #͵nrW1ijʦ.e )]BØuv'=ui/5l䢖FB+ 4( 9^ٕu7!2Yo[]C =ewQ<knq-P3zf̶|qu9kgOW{l6/ş&<PXJ0&,~]ezkS~?Ě_*#k6m3w܂"{Mm|aq-߬ǯ裫#z?KղLú\GܮG,o%nRĽ4; Oǣ|SM.]n:zJkZik=+nRfk)i_WM[EͧnH˷SGwu}E.(mv{w[h7Yn@Ȫ9^O|zthh%D6{}tQP6<|m){,/O]RW?lK3Vfd3Go" G7۶7Tln$z?x`x"/?d6Dj[?K<ޗۯD]5*lvHc1*y̷-/5__T6j%sQ81s Οa> Z5=;[?wl:dy/G=jgF*Yq1]&zޤ?3ܦ]푞eb.)=ɿ>pu}uvaU: {8DOFٷPr]F e2`P\JY9\(}|a,9lW4i˹\tr//C@yԯ#($Ee(Za3wQ7Ptg.Ⱥ(QVrU$FztY6חO֦o[]lǮs Ypqa yjwJ47:DL 5h}ѫnrO:[k2̛bee](̖ΊD ii3-%--{[\b&rUԴvD isVs1u%+kSm]?EU s X& Tzo8UK 뵫BslpvBH(`%h$d(KkEnsJw]F]9F* f{8޺Pɽ۩[R۵i$$LVaBU6I4 '!\@g̎cmRaRaz6}se Q (S397FףU[>p߈XX$w'i h,MNbiEni&Yz'܍Vˬ3W0P)J\ݤ\2%lGӒۼ?\\^VЪQ_=ċ?|JkX&,2ȝ\^5udMU Ղ=DqGnjY55XdP2pY#(JbrBh%" pMPHB-ЃBiޞ๔\+/`^WGk{ҕc4Z]٤} c;"]JWkl,"ZgBgIWԕBkذxtE<](;vpt%:htEVǢ+ BוlG'] PW!pB`X4"#8VuE`+JHWd4B\tE+"j`)htEFŢ+uDJӛ!2%HW <]FWD "t]J u%t4dJeger Vv(78ϝz&{=E.'?bWfYgJiPB4zs\ZOJO 6Uʄ*+Y=:MFK,H݅JvQ,@*sFAVZ+rU;EٸKpMK27 DR$ A۱ ܨK-ʏd?*++tos`yDB`pEWDu"Jc+!q+O[pE4"++)+RF+VuE%] PW 6"]!p7u3t].XtEj)++XK7+ ?"Jڇ+J܎ ?Qn<}WD )Hm!ruD({ptE:.E4IW/GW|Ǫ'*'lՌ; 'g qOJlOڷ깵6?`ãZqxu%0,"]0htEJĢ+.t]!cIWCԕc˾:]-@"VL:&T]!{;O\bJdu銀5FW-n4}WDk"Jg+41u#*]nϳ&zuE%] PWacEG+4 XtE+4L$] QW؆:6J ciN$w0ֱQԴe ^i,Jk]+ex|.YŘN IJ@e글W<F &hҰ"bǪ'+gj4L^HB~PW>LyQ ؘ)+toslk`htXtuXu%-x$+>F"ǡtJC,BZWVa)06 pE4A7LuIW{މQh ]WHMDDB`D4Bgt8]!.t]!ewҗJKcXL}Wf'B\ŢVN+JHWy3:hEQ$] PWkTu,XrNIAYLflX4&\+5,7D+AjZ1d,x˘;iKН3g"JI*9v+lLmt=)?\M.:zu ^L'wzy ;{֏@ -{WzJ&][\) "]!pѕn߃ h-+t6jʂ  >IWD (NLtE.u i9(%O@`;?`ej+ mŃrHW G+U,h ]WHujVZtEFD+;Տu!a`Y .=gѪ mz$][qgDDNW<:]-+T.j늀FW Ǣ+k q?N[pt%*&]!O@]FWDD"Jt5D]s6:FWĢ+k-t?JIWV0تhtE.+t:+/Op2?Pz\"q眗 :jsȢr4sf%%ՕS7U-a:ey~~_Ӽs Wz/R[}4_^jW&Y󛣶_NYxKY=>pyj:?m+2I>8gul#u]Yo#G+|]RF)`50`` yHE[=US.TwaHb%""82qJ m|)}d w/ pr(Bj>r}_9Wl;-@.h坳FQ- `N(lB >Z&']D#PA$ޡVP jbEC_ =cpE6Nֻ| zR_W[գ?f1%uxQjK[A*~+D}mG11 mÃB_7wp4pSlUjgǛ_\IXJ>wa7pm(U2^Lx2tFejN}ۀȔSǂiϩcb]6 R Dz-jA"=&bBյUCbXdW^< 6;>9%ö8҂JfGQ0TJ )™M.ouh7k7A70[gRٕ,M3aept f?Er9rf(S e* }g crA̶鼥E_oΰ`HF&\2I}$rP::# :ޙ8x⎊8 fwIJSIJ2D >\8"gBSRWp9m 1\db-V='+KF`zkY>3r%sZ+B hJQƅRxnFPP\JR8"(Eqw8b<9jm۞fX$&QƕT)JSUk'mJD(Q2%@q?"~ĽT+BFy9t2 .KX͹ǷhNc 5O-*E"qM%W)`?QBM 0n1=8gAPz@iٝ2{2;^('f}iyv4jWtq\㧿V\}_j)zvzS吙ޏW'3 nĒa~dˇ8>U~$cP9>x =^#o9.r#9g(S׏p=Vy?ʮ$vЛq﷛*F 5$ ڰyyjb0oי$"<#^ WcrAI|]_ٲntUNqS4f? ZAݑq(Dg#1 q37'\Ⱦ߰'pZ1edQ]&u豭jD2keonvrtPf^?`TM-=cW M#YV %HUkP˴}xp#" 30C9jUb<\qi[~uo#R`fnzzwup{M`Zm"UueG>C2 VE 7Bo̓J"2,RCL0[* onMQIϗK_!!t6KwF)cEɨiNȼ.i I6:9%ѧ܊ӇW֬@ĊD6gǃ<ɡs3QyTǺ/Fښ9jХ?'IPCl|Fl9Frs#"cdalHhcb 4%_Q&S8:i:&Y+˫t!K=+ڄ2 /owq"= B]eި.{S7O#dG ^VbY w7m~7~> Lvdofeo?9L`_9-òѮ?l;u&%dHk\ox|ӵb.4Ѳ1j *-ѝL/e RP"XXB % H2IE4$JU(!pS̆ GR F&Ɓ1xF`}HZ#gC udtaEH-;|܌Bqa,ʹNm>Yҏ_DG?wzu2)1R oj#:y 0l.X™"CNx[}#B}כ~yȞHb ^ ]1o6ξ9} W9qKg|NlEnA:q66ޟ~}K> ޘxVA3A*&]ﭿCiEϛ4Kz"Hemx. *9xu]uKMVx Pʴ:ORs#)$4m%w҅g&o7I!cW, bĭ',9^ӏҟƼSkԈ|7P.bs-~FI\&XvSFF `#VG#$%NтR /EJؖ%5rKF d4m'tެ|uwUlST6,۴,Ł" 1gz2>s˗y^Z#SETeU/z)ǿzHFbqfjK`-Tޒ lhwn؅Ź+"3YO{ E'h)$)A @xFDQp"QhCnjS$?G0An8v?+rsơoD/6F" hxR{gmK#SL9)tӡVJ#1Z;젣LԷ&ٛ _+a][dPp`hQ/Em[v}@DPYAϧ E9W㠈 :"<`,R3fA+p{9p.CDJ8тL{wJsAC>#R0 8//,C|{B-Z;1 *(Ks1q-3*&N\$"uJ9ЧEuk7숷\Kx+Z /^W]ri]aP68˻B; 9r`̜DHV,;⨳2F@<[0G_hbG%HuOsZZ>--.Q+vSGamԊ`y(ԁӖ\ǍцhQ|xZdm\ҽhkD$>+6qB2l] <֜=lf͡C4O@ QF;T,6?m.6Ķ hQW$/ #%-2I\[X4褎ug&LL֗a':")K@1.p!`R<:jVck'˥Yיm[T%q|Q}#&,45$h(*b<0¨H8q# -H{ć  v7e|tfQl]~Şd4<ݞNYfw?$V$*l $Xvck Y& 6$Q/D! $"pU0 -+jkpW~ҤY&wN]~nMNv~~n=l^^Ҵʏ``eY# 458a/ 1vƎmK>veD2#ځF|Hⵖ9BGhd9z#L:d 8$kī,iק7N $YN_ BIs#@)<A-3bkpg/NK]u&%;jy^x!s̎{-)@%dM2&F/!G-LA#y-gXŮakұ+B|/@a-wbJs|( n~S# LJbh!+)V&5B4מi㨆|w_ع|ǝZ쥖(,Q$WCRxHBp95pꝷփP0/Tڝz 9x'#W̽*:#8Pd2Jmۀn 66k;\lkm~xN,+&5@m-_>Z{:,izLc>MjWcZ#h^]=&7ݨ f kcWXЈg@ff%;nm;s:ĎX6{mM686;ٛl_mD0v uQieX2H x(ڛLRB(PÁhxdII(khXemkp\_bw߫qm,4POWivK˭ի04.dRY$?$ס 1FEMN9Shs<2:LA{c< 餕Ș3Yc"yGL^*Hze钲 }B@ÍUQhEG\h!b^bqHlSzPPr|KXMi4^|0WY 2Bf< i-fKV*$"3&yHQdZ ^7;\O)d)u){Lv~$nu0HO`sa{̀-Ȏ Z\} ј:!$g *ÎTf_e.vmpD0De&tau& JOl}(z;|uȫfyuhI*nXMNwKojŝ|,;jygfWtN'wV#=UhwћB+FWIt3,=i}b9Pmzb˺ri FregIl7Y'l핷u$3MB4Q'ctc( Ufըх8T(X?nV^rτ@ݳZ6".i0b e#hFVp'mHs $2 9M~D[9 \VMw^<6OpFՌzLMqU~>z}hCD}6I=.M'>>M$r7Uk,ӆ<&}x?o}<.f~}~;;,ޕHsQĶt?A3%f-iE_<ӂV{%3=@T d {DWv J:@Y6]`motUP*JI&#* ](BW^ztUP 6ҕ&e2}ʪ c]BWuUP:D2 Uj/puoB-UAi@WHW(L0\\7[ޟT_̈́6M3P1u W}ϯ_Ul^4RkSfINWO=4fϡƧ{Zo$:jO+=ծ]ϕTzDWUku_j!NW4] ] Q=+ \}Vc骠D;D[*hAu J:@2]`P7tU'vU;RqE,\CR֝*4)/MTt~fۏ)\Ɠf5a/|9% ߏ~J9 KK^oS΋ZlP6m>Q^Se끲N!5e;ΪiW}yj7YIE^BޟCR|F7*D+qjxCߖ):m.G$*Cmhc%7?ZM,ѻ"׶^¦S5n 9SdբC\͟U<]I1z;'5M;G}|MnkekKv0um;zL&T0](pUoU ڗ^|a]~kP>؟_kXo ZV 8Ul!G.NWOlvipqt4vOkѕy]vz^N=بU/tQ}4B tut%AUE_*y*(a%Z `mzCW3HhmR tutV0=U/tEh|J!:@Rt'R ]Zu" tu8tEdDBޏV -i|骠zC+.uEUbW<]qi5흮GIZԺe^Uϛ׋E[EquftQ~a!巓 {{.H6^ ԼпRE?ˏ9- \էIgSNJ4Uǝ9n*.Vp]ߚ*7qt6 MAMVEJ5r.ͮ7|]leJ"w>|_|ҫWܧ}=PUV)qWTWeO5ϵy0UWOkPvb١6"*D$7(nۣͲA)8DI =C?{n/s|3@wĻ#Ut#Gd=-.?rey: !pTqcVrῦN(}mr0cpR 9q2:ţ\{[vN{y*kt\$r)_㛞_lI=$j!}wM')Y>mO=_9g>.";h jopyn-)'΋ !-faq%6YRfT8UU1ZrV~R$}ԯ'ۖC֍P6.M=6 K|w e&PdK*0!Kh*os#ҨiPvpW7*҉3 |eaj>HyYEjSő4SAd*هXK5t) Qu{UU5^ 5xNuulaST#HdtkNqN>I)5j'ܭ˱'_sXhhM?cʷ2&XKo(9P*X -DmT/*$UW8k}HPA ImgjSrc*tE9yXjjc*PE*t:ZaB BaҊ ӝ6*ewhBZ% l.$ޢ ט TTPtC[:6 4G%ei2P.қCPB/ Őp-)Բ , %l CwV0׍ YZ˶ pdQ󢷂UДـn(ETkw˽AALlG6n)Hsz7lGTv`ܽvU s&E0 8T؄׶A\cӜ`AV5+-w@i̠b oDx,8h&[j! W۞dW^c@CoTzȸCæGP 0,JLh_Daը |uZ!$VX00a:?%J`jMPPgEeDG(y;@gi!\ :k¦b8W -te,\ Jܬ# 2}iW (ԩ+0d߸PЊTJ]ZT5{VQRA}k 7 :+=ˁFkuD(X(k! ,o 5P cN m$;z|p*; &o21O_)_nݠłTM%>c6\Bs|3(IUvXNZ_%` &^G9ħ5ﶊ/o*6UOގ]`AQCیZXI| x=Ky@ *}vl|V2G\!HC1[Zm*d`f`yb@ '/7+XX肺V24MTLyM*}ŸQ0v0G0/1 e!YF[[yx+$@g6җѬJrCk4=Vxe&[W"QJ}&Vȟ>Yʞb:\yS+tmm,z ԥ\~ Ŭ>&JQM}44R28Œb5|0 j `QY/5QpBؚbP>() 6E[L~V3njKEiyB4 $'e#[ (q-mncP:LNDi 5eH?< B#;M#rכ_ a2,Ƣ tרPh!0c;QSz5*U7h3,Ɋ5gV3 o=< Ե|魩+ . 6]f)40|p_EUY]5S|aȷ<69` `Cx; _isl}873b5 P!n~xcs`3 =7`.ؗNU-mÚk9&mF0YeLh ƴy_m @zVF6pݰ(a!/{ SiCo40zl\?-Д nF[=hͬ`l'aPJ↙ttk ޢp \8 0kK2tiL  kEhBMzSjśH 8en V3V Kk.?t+{Z F()ĠT1tՋ7Bbz+^:eOQfrXS4 2r7w?OMP0f*)9: ^||גzj] wwt˛^޸Z:`B7z~֗W7WW/O_|Dѧ %ǫO:|qÇ~*>|ius "!է|~cj?FqD*NM[yFtw19kF'#(@^@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I#N9KJ-K%7$X~+$1  }y$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I I}Ii$D$ZLh@!$g$ tI J$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I Io(B%%8.( )-& -'zIcj.*IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$' zr_My2_t[M77?.ݯC 65ЂK 8*5ЃK* .CpSotx[|~9e`}kR" jtn$Gp=a`ʮ꧿Շ[h>=P ~=ļH =OC0ӏ%q:;ڔ2_uޯ_`ˏ 亵n11Xx'}^}xAA<`|A_`o_ 87*d q`>ޓ=̿󆜴U.OwEe{|{= F, Y?9CJCM er9 FpL>Y3/ȇ%绘H0Z݇1J'7~|X|Pn3]*_ qPpXtw(tԩZ;DWL,7:Iʀ_]1bBW6C+t ]!]YMxbڸb:]1 tut6 786]1\3h=:]1[&K,åŜjg:]%):B +엣.Q&c+Ԓ620`CW 7,my)MWޟw\_KYA]l&6lZqrt'cjl{tmVI\n[wڰ' %]* ]=u굳օ{|'nw 9xb ]!]Z]1`J+R芷tt(:Bޒ +e7ŘAFkBWHWΓzAtbŨ+FԡPZ|PѕZjBWСtJ*ܒ]1` + uJ<]1J#W(`DW sF"[}t(chLz/Q*;hQlFp.8A?(=r`(NE>AfYNv',v ZҕaFLr!p"\\}'֘C+FiTwCWz^(rI1;]vvOO< WOWz}X]1CWK{LC+FiLTor#BW6C+ ]!]h3 +˷+CW@:]e8]BW߆\ ڧmCW ,m8xb$Hn㾟#uQQWHW!okVKWrN{ȍe0@J!`3df0&6HZIv{.ْeتniKUC[$=w ZvvtutP"7BVBWVֿDK+v6'mݚ)4C& j>xTj Kl, k`X&Kug64"lE4G0j4/=i#?zI3fpyWr6CKϴ SC]G>sڀhGWzJ891Ĵewfh o;]!J:@)e2+ˆ.φk'~9]!ʶMJ)ψF5k΅h}Q*ҕR×:upɅdt(E. 4$->E25ϙi<4AeVds>Kh,u5UeDMIwj2:Q/j[~>K2"kp}o6I>҈(jNrJ;A2I.thMN%̱]}5t%zѧhuHc#{Az3̑fhřNl JttܮgS]`H6tpu6tvBttut9+ld>tpϽO3F% @>fp-ɅbNWڎ.$eJ  5 QDRT*B2+,y6tpϽ;Y3Z1]] ]ijI]W'+ujAD%ҕaB|Aˈʅ-'t(YLRD/2xC"=|Tj [?5I#<Զ=4)wiR̴~Xmg&xCD5%t_Oe7Յ>Xox:Ƌa֒e<;PKwaͨ~\C{ sApWx%02jN^7sU]Iqq9t`+V\] ieex?MDvcӋS tїX*ЗwOx2#XWy3^?}UzӶSkj(1=62n9^4+z acwn6** żtN U|.mO}?~+ӛ2尷2Q}j:Og7E_V²f],eSİX t\@]>th(QModAKQ\wM$G+U 7ABs\naV5,{NC{\Y9ĀCF_ 5^MfK4E.q^D^@vt"Xex!w$OL)D/D$xkI$L} u.r.EtV(ꥦ0¥Y1RPد$'Dƹt[KH6 HHP^L+Mxb'D'$BmDy`x˜- X h7X'PAKgRA'NIXɥ8APZޏTHoy頝0LB&*B$<,zJSsf%%=QG(^,Tbxx"oM1y1UdZKІ*ep" N P #":Q#@ pMceP`$:Ηt5a؍0J('?#ZAwwE~bS@w`= KBwXDZo_-?CC/kSy"NQP~*p=sTQ]Q7Mv8|ԊW ˯Z8Se-S5`hkB+fyPàPuߵɂ1z25Epgpe0u\[ oh/5 E{g0yOws[GKEudDz )X]<*v@#^UIvSi蛢^Q-Pd29cp@Ģ4दN3Zvͽ)~:deϦOSŽ>FGY<3J$5DPu2JU4TҚ5B"d |eIr[!!uqy3{V9췩t2qOٽl59vr^xb{Iy52gW`cwoxd|2B(G:UpҘSu ΒO`VZ*|'u/Ȩj$ǥʢi0אw?r7+NYoz^h׺4h >M߹?]0<*NzK'O0#`zwc@ Y?Usq249h,ov{<|%Z-w|Cu\,u\x:y&) =_.7؜#7x\x&YGīݡTb](fo_7땴or4&R  c2Km`*Q ᑚ@sƉiV5ud,Ҷd,k[z|ݵXw)츴{.:Esx,Lʈ[&c)68SŽ3Ir z}we=t[Vbv'HOv.̙\͉$gXr;696_J{K]rtkA OyzO^`qf?L:2=YT؅+0T3L}շ)cLaRPLIp̘NLuI ]RB[H๥qu>R(("2/EZ9'4mI^-zf8昒3`6ݮAYꗌ!3٧isys}Gܧ <KKMg3} C< v0lWqw,KPB6BMbrz{erSTK]^O|W^1N}E?imjֹM}춸wm rwb$6Y%/lOvt_/qgW4Xqil>/JtZ{7G| uJ\ؕ. н?"N;q[^"<ၿNFኲUIno*|{ Ǟ33Ό]tЇ,g{R^ٶYKyno/,7is6+I_9B6~W?dsy6~ }Y^/E4WNTYY"tTswU#QG=wZM|t3#*u ,R^4x1ݔg齭t3jSޮDzr.mJ_=Pm;u*;ľQÓ1zjNV~Xx9ݸ]F)SL%5қ̥cFUFPт22Jfsi=.UˁE/B6Yox5?dqhSV!xltOʄ+>ļ^!kiu5LumW\H#}wl;)BEpX5|Y;D~Y>s2Zdt+_dtVl~Q*4k[ F7dj0A!A;9>Sm7zpOsCpȺEv:zSIƎ+[jy9p 2Zl$N2ǹAʥƳ˺^>}nԗڥr)9!B'X 6(J2J&c5MNތV꺼ZtVݐNL8ܫ7K˓y~2r'⮒1}Sx[ڄm "{a,"dډP>R"4y'_UKKgع ӌτ˫E,q9g_p|>OZq$Z<)協˴"h%3Ld)m"Rѳv%Ζ¿GK^^Su. B~H|t! m׎_Aw3wS6zR7|uw˫w6/jsw2oogVWr q^mPm74yrBqr=Z.fΘ~֫׮۰\zn4b[ab㒂F)A.X^ɬvNv?$O5Cq?(K/;ӧͩI1,1+U*Quţ6 JBԊc(qU43b,JeiDpA(N&&%KuBٔfq,W=+9Ȏ F`بhm㼐MfO䠃;ät=JD4-rL4D2.G΅ItICUVgΒ#V.zʼnYibє\7֡rEk%a m12P&eu_d{5@@0͈S{cfR[0eP,P, L2(F6Sϙ}5e@:!`EK;:rIȃ 4,)O֫>Lc%x5c$Υ*2tHAHŒUY1G ܇k ,> U{A;܁Zܼze䢢ԬШTJ]` 椈R&qkPkd7*6x.g؞njңKNS*t-2@O]k'v!Ye rU)x袊t:gUd4Z:^iogm{&N`LYYi:YQh1;/'۟An>{w{woӞ-"C[{g㒸H/)'ck8Z)=ϗ%|GruN;6zxwVֶ;~M'2UV7>^]W_ ~W5l85Z71 ۨx6&dؘL{J{4k%w rr&a&5!lI J.AiYTu˦S2: `]JGt\Ef$͜,J3z'h,ڱ9ҁ!4=5t. t}_NNܼ |mʶϒd[ftݭw=>b;srhYEf!t8SX$1>d:/uyK;2n ~oרy{8/~7Đ^@ѧNV3ogOb;57_ؽd&%/W )С0]6r'9}\SF.k̲q(@ٳ knDZ .) () gM|]gJi11茵*>2GQX{d8RڔC_> c,xowUѲ!hϢ>۲r4O֒b%4 ti %uDkʊZMÊ-d]NV'ϐQmXmxJ\9C`eM P1e8LAIsK}Tu9ƌJ4 + 2hC}6EÍk3Tmd&Ӫ*հ8 Me,T#*qO#eX].ݠ񗗳./f#KAZ2L@85Nř&KcdAW݄fh Q0(H,Mil kTE1pY(™RfBeĮ&a4P̡vq*jCeԶ#j^;$\Ud]'%L$4&pRi/TCdXO\C&ː+bA&KX F$ c֑"z_s[RQq0vlq*"ʈFDqߌ7FP1”Q K#Ϥ^P+EJ饺g%qJr$%r@% 2ABYPUFj}WMҐ|fɉ+ l(˜dutxL&c.4QIB]@$X9zfǩx*< aû6W/n~|?G?K -.djMcxGRJGmb5m 3x3;Q\I#v#f(G**"ʁRbd)ANDt=:JhJhn=셋V䩤həQHèJE, W8-X}kе9l@w<4[sqvmp:['=Tl/mawޏtjXjzۀY5bF{iU.%uj1}6`K-fV25ZEJޕ>7r+cƵU[<ˋ+Χ8[[8%ZQPeW4C:x,(i]i{0?t7z1^̤)Zz6pUQ B%\B +"6pUE\PPWzyw`џݴ-lI7;ߔ㆓w}qۣV+s|ئ"Rf~=?^M&/~۷m/tV'?Bݪ#ϥ8EŸ;(NljbBxz݈@[yRI;x%ah珓tj?TsR~I;tI 4} )*D ̦秹ᤍ&,6VKl1d9pR:]_|+WxP>wN'Q1E QJ:&CGֻg/}N6\q~V:="3~_.8;ݹޗ'TP9M5cClB< о"Gz" Y΅q:x:! H]2Vtf@U#8r_RP\̘ri+HV=[H`"Sݙʑ^ٲԻМO'_Rޱv'cwE?6g~hnץ]Nc SRWk?c(#Kܒ~r)*OP f5 ICrU d&CY,ցKIVgD&p9A GB_ԣϫ^{,نmyjb,ʲZg@ΞMx@!WP zfz 8<[rDv D˓=Ҥ"#xTƪ(] t&nWڃQ q򸐟2p:9=M>`n>s̈!+%M 6 $!K )& 7>|ݦtH>+'/tW.ďŸ_Ɲv8ve>Iө|>W2$w `b@l6{LN1Bq@ B垍_@ S'(bf@9k ^YU] 4fBSfY['$+8ɝAbV!+C5ranZ^eK|8٢LlvЈ͝WST_.Wů?M"JKh9K)ގ6t_ߒ7_=PWAKoG?\\.g֒F.݄^MNj_8K.ѷ+ΝR=c2臏{50hF"ž~'7FDI{B>q%LvwѦxօѶyNriR?ĩ3{vnMr?|Ss?lEsVO ߯ETWmUHͷml#jIqP^kpIqHqx~i&9Ӻ {-mOtx>/ XreLԵ'ѿBHWWoo1_ nb&8 m?C5 ӒZwgzSsyW^CQoz.<; ;k7y{tk A=Է[t[Z@콡d4&n'ellb=)}|t￰y[eɰ/Dd ]=5-v<>>Zl-<Fʐar ! 2k8 y(^TY2#A P Ky{`BEU:&p gůo;fPIi\Rg&)I|VHd kOVbN^6.l5ر,VϷS͗zc3nzP*p6}ޭ؜h$V 14mF@g m Dj| \iŐ-lii;[Z\N+h=Lzw2:̉Ę{ơ.eh̀ґ:@C /( 㧥ެu^\ڥeS @B0(pq`)וLrwF` wk]-iM0+{U}%N]o ٮV?#뱖6ͤòL;ݽRgX<|}ߪ:%f%Gd;- &@G3epVz=Wjli{9"FxT5v̨Z$éG26),Lίޏ~gdB~>&mʤŮ_Ŧ'ʎ8̺@rxJ!v!mb`Bc \dT9.4yBKe:Eϱ6?HC62/=a,D<ն!ǢPNr]èf^N~Oe*uaufBw?}'ϣ;=o c#(&[Ԥv$X_$Ǐ)2׽-/[_;7X+6V<)Y2W]˵>BMKOAv+[pC< N7^zIV.Ƙ;D%mkYk]16Gb{u!̋2]ռqm]f\#7`1Pz^oqwsu[7Pg\s16V)4ىV Y6kY]M$MF38 _P _1Bb4 1%>*F㤈y })ĈVۅ\f~9.sbAmeXYpN3g(, f1ұ(ZQ%>NH gVyP$ن$br<@HDj.0AȳʂZ͂] wuUdTFċ6CZtƒ F꘭ѥhrFS+D j#c5r6#c=]V]PUBp5aӜ^o4t鸝pI NpB#rp| ,D`",Rƃ]*b ze(ƞ 9E)d*ۨ3v̑*YЦ2bW#g3bY\21qǮm*P{`) OLpRzf1Q#'l\'@3l>K6CV:usHю_L! YA$MX 'n"ő0.8,Qs93yF"v@qR"WhLfc`9z&#L8DъJέ!mTZRcJ#I&>L*)"H"9j"0p2"V#g3"vU{vtHj\#.ʵe$pqua`"b&i:@ D+g< a52ehV1Y0uApCE?rKxG/V)d6nL 9;)QxR;b{w܉ d1%FIkwZ\Fͼѓ͎;! `U<2:#:2:e%g)[[E6;\~g:VoOՉ%Sn:ZJsknɐSC{mboէWQ_w?פGn4DfTӴ56i9Fw(2rjOa+w;ѷ׊7Lr#--ZLS+ M_ &f ;B:BJZVD*|I٪&Sx\J95- ol@ )~BȤbj|r,vV#gk4= aJơͅV\Lêd5)ۜhh4h@C)q0,Ph-#:'i\ $$֠1H1GM2iaL)o2@1d4Q}6Ъҽᦽ:  )ŪBĤ27+IG\(&3beBoZ9Ʉƾh9M&ʚ bO"+ӖFedKZwi<&FWJ5d7]ɝOxd_Av<_kk"=q׭Wx ]y51D9ɘ׎XهViGovi,o ]bʜ/xV{wkȴcdz(A7J4=\>7E QɗiFrQ ʷ| urmRȹž_y;wuх3](w'a"{HBkη "_/W޵6ۿҘO mQ|X$ "5|z%$ό7˖dYnYLK.au kefz]?Zʿ;:jv3ӻ`2c8 96רrͼ@m [gOh3ٿ ^+%XR\n\vz)L*]Ti,eJi?A~8"rq'lFhrr&z&JinЁ{iHDt JO (DqŜi80JRQk"tD,hac`ek@vd =o-PESpJ f(\*iCC$&@-,p\>|俩ua0ipTȬӮ)mCtJQ OٖlܚLlWg(}}lx_L`>߶DIs[I'Ib(7GG4Ϥ}l8.?[DA Hl-Kĕ:V?8kA"xmHRt.zA┞p& IPhF0cd+# ƹ{pT&H1C9)Om4$zhN IJ[+^NP3G;߂&_` ;s$py> IwH7 >tZc6> :s Lv,X0 ``xEUP9kӒJS> c&JerҥM4 DB+` 3QؤQxc6QXAaIAC^Vb b1k~̧l6LuP2K.o6RQ2IB;y )PG/D>{01^h/)I^Rt|}<_R2BaxpSko[^`QH> HRj ]3'%:Vq1r \$%f2q[ux{ c*jhnM;B!to.7tJ'ghd@3Na-gLsG0H>qQ%J yd1m?$81KJFɃLU5 I`-(#@|zG${x&b .ꏽhNP+)m֧hpvUBŠT'84R ^1qHNשzQI$Bћy@c+B71Q|a@' G % ^)͉qʣ jQHpE O6Y8;|y oQ߆N@c}]:^`bn'f}b>\;M^h&],~?oP( =JIkVB!Wy&<U@zy]΅>|Ǻi)yՆVwaϕ\ WnJct^CA,y] Gi1\bֻSjMMF?_aCO믹?ȕ8.?Sˍ!4v5N гbGoWW**OdYQ7P~$y#bK qw r.FFI./do]b2i"M\FsZq8, ƜGG(Oŕ\.!cCFe;J)wOCpJ^fj*{uUIj}KUJiUީ|MY2M=}ޏZR>VlǏw3?F??gO3q{92~.◟~.ޣC,6!s21 |, B*'{$uJDl$"f}ƈ`ĢsoV1:ꫠ))O<'T:3uN+ NF͂F'2C#9yKZ%C|ѧ܊ywSICؚNZ=Qr:3.O9ԇ# u5nEEl%$ "%ܕdilH 4&9*rUE?u -F`?&x}\`q.M+GO:2}0)k?-?Z[1 hKM.!Z^:l))a FtMizUZ1Д8"Xp@ !.G($]m!E$.}bg2PjR 3), qT@̙j}l$C mxxb$$>xuݑviP!q7֗vC?zu2)qʤ "Z!0G(D'i uTn2xPk7.׫-O$vH&WPf 7Y(!%ggHH5\iQZEf)i@|V,ilO y]Η*yyLs]C4u14W_^*)~wx8o ؚ83rgH9 jnG*+u/&v|: \sU͙ t%aReHz{~xT1)J.1N"ndۉ|WL>F#qNaڂAwJ~Qςg}zyl/0L0 ـ,PJ:RjՁo @BRgcDs1W(-JɘWc]//@Vyjy\s,i I̕|ڷ"+pgcP\`\J+>usT3Wߠb V ,+Wr. Ji 7hZ+ؼAUĹ+S7WYJa:s +,u}xSӬП&ְqŸ+/x*)FտJw;M B!1UBZ:Ey!F^4n N|?? OtO\l Z7uzt,J>+SWg噔%Ё$P(6ftdTieE?r6ֻయ*b}~;U:{%(at\`RxFLqLD$4Vx ]};5+<3Ɍy11r7Ͷ ߅l y>T0nha$~ɨ 8h2*'j/Y>x隶i-Q4NScp@ :g(@,J%YíN*dӭB/\+"sĵP N{B1 ZQFMF5 eC\OIw$Duݒ|Ud˺.~ 5O1;93Nxo3dQb)ĤZeHW*H2ћIQ 9ryPVJAUEQMWEN܌MW^;k.:<ޤs~!= !ButO2@z>蕨vFTUoUp2 VP\o֗W3g4F\_dXؤ͵*dC .G6/kKq zS^->P%]?]wo/s0y=OYwxghnkG%/n~fcܮpKy:hw˥0Od@or8'Rw[w#f{^%MANC8afŻ!X[t#yڔi_MuR9т.YS>Tebԑ0@wo'p(>ܞab%Tq8T0R)YAX*2}EpS U? Il[96\"&C%.F 6O)eVWR(j*Iap_?}P Kz@emTi*~l;r}ѕgOy6C!Q6=%Ėϵ8-C>T4&T}W} gV98BO QnjIضb0XOZlM{ a^ԗ77޿0k;*~nB޿ ۸l0k f/CI7my.v1WRu2 Zvz @קw ߓ_)3ƲizftM 6MAPYWY MNh)y!%ѻPp&Ui5)wśƶ89*trD c`~PhNfʽח tDi64"w~y"Ҧؼ8bF{k{,:dw`ȩR4C[8sUu4 ;9.tP2̐qdIA)(f'> +em PIPr9V_pAVOxhm'gAnb)cUkM%$G\XbN|4\UUj2d'gy gۊ\[-l>N3M*&'s2m֦ZΤeJ3j\Y9W"L uMBr >̿`Q79&Qa9 -/=3%^(1?MbzBNxN :qɒC >h3cBHPGi쫯HСS&!(7ξ$fMT4_I6d\UP В>!k8m!q>A\ "Hr;b3S(YCgm8\6i+r?{`,ڜBS"%:XH\*եHbj]):vIf%A&OQNº*DQ ThZ /W@BN^c&FOyM*/  Qb?TT 1אZyU^Rd2%J\A>@~5%c\`+U %x_2cI䨘b CUqYvl4fis<ԂGA[-'4_(h,;|yt8br4\@:\O !Z$djzmo<(q3+뿥T 鷶ĖlJs[MY= p3ʩr_!m!˜a @5a@)EUMN*)N"Bڽ-wҘViO MågV;Yh;K|8MJ9l\.)nt%=9NgAG|CXܨ($ʎ&on08T$H!>$%ƒA+SzB7z"9!urUȪ|6B2>"jBb((LQFŜSz̽0$TbyI{/|Xe?VB(Ip>/}O}n={_>]|-vA+˃*fFt)O h l^H8oJ*#e탏WPJA 3H06 ѹj]A8ƦѕڦLV34 :TE ,mznyILhKk?B>hiGŧa3D٧ +ltp2K+=^8W_[k+˔1k`*A3fjB!S̡'+ѴعW^]*_ CXs[(V|;7=OrJRrOa #I ) اbkQbJKQ\KQNKQHKQBKIVTpȥw:0ѥB&pSm6¾xe8ed%B""[yϢrhύI_w)f}~x|L &bZ ȀAi1Іkki1Fka-(bgl1@^[]۔ j1|rC 3s9baQfbOĎiwi_Z U̸;1|znq FWp<"4HTBEoO/ z0z:h8vHKm{>s 7m .M&!Tw2oݮx}aJ`#nq]ȃX-/Wuz}}H{qAZ^; 0&WZySCX2I6Gv|$yѴ]&O`EB5Fl_SQ6O&^I Q _+`+GPuLnOyDL S[&dK[smb<*&{yhc]+Q&o!bv Jl RFk6]a%^'kŐZ`,e|h>]U_whԄFm GJ^.xD j>">}?ռ.IYuzYcCD#%!-ev ȕ*IkԚӺK_'1A,"2K$_rTP*R]rDƗ .ɑ:!jJΙDj]ZWQˆJLY[tPA=1W,cpj]!`>N`veK.-U4[y'N4e?8->ŗ˳j;դ+_ϳ?*C!("ǯYo֫o,,}ZS޶ݽ_@kyg]HK;klj Vva3Ik؞1~kŗeGueWk,ƥhy74IS\S3,d廖M+\}|ÀrW!¹of ~49΁6EG[i4{aG{m2;/;{'QGevi^ݱ]KϷ& n;77O" i7 тĖU*jh]%Vl0Uo ApޞmͰc#lgWB*-%5:ѩ:,oyUCu/N.ex>fvlbl(L84ce__V&6u ^8vD,5n;Q\`ERj0A|uhJ *9UGҞqY}p-C~njJ75T>@froa 0>-p̮ۤZBh]Y.6A~%J7OXٻG~6[_}^@4m~P㗄%mk_7=rcUvBZ/bh]0 (Ci/?v\BW9?{׶H\anvgdFw=6=%Z҈1>Y,JZEbjR:q""/<p<5}%^M3G_U;v@?d-A΁kiO"Ee}h9=1G>b3ԺN6C;pg;p8PR쿊sI[X|~~lc;ߌșYr4TR"A5L5)Nv-2Z -A.LY9yu\WY﹟n=rI0oe~L:V܎YxqqPjV mZ6ϯ./N( ҔbWY䜕sou=ÚrSm,)52- LK[o-|k 9S:T/RYo>]HP> y>ǧzi%AҼH2{nI\V)s35en2jv:EIJB!&-̊L y]7)fŚTdŀmo 'z_6k諳u0=0GMI#_>;g(v}uF %J8(MlQҰ']o'2 Eqp\buIZY+NF(UM" ٮY߀__M~MJ xLŘSdBd݈2Tz4E\RhzQRTei#־Τk,\DBt91y֡JCRɚigR>up{`+xgsGz13LCNIr6MDD".t5\IP2L|1C#Vqz#<^\{xoĝyKzӆȯVhW$>I+}ָlI%ASR#< #4Y[AOZ!?M2]v˖P>wڠ(.dDjN r,7#6M}iO>?.S g۵rǞPLc._וyxB7w1|}חLzn6"uk6P%{dE'qktZyŬRW,-×=_ۏwٰ{.?/Bi;1{,eYn*_3]<3:f!U~Qz'zw\vr<_KW!y<9[AUzk8,b)&nݢҧZ[,;N 3L;35(Wq,ҔU0wr2p"2h(<,*QJG ieŁXsUvWU(F_,S ۚ.DJ6UDq"aHI`Up0سz.!D %)+LUkƱ6?n^9?{4=wԼl!hv9`;H#f"ӛe7MأFωdfB8bRNʛ_nxur5Tt>[6 M?˰\>+_ux ѿ^M_d-Y)-`ឹ'|]oJЏq=!u4[W->Ɨl)BVһj\fFg9+7g<s uIU~p:p>e8Prc5ȏ.Z1"Ǔ~,NoCġ; 09cwz#޾mWmt`j7c 軡@Wv}tJ`kGCW c+:]5'zt%4&vr4tz5ZE骡jWHWy]6j4tVX hC+v^#]1)/㡫׍XutPOt JbLޕVGn4tв=`(]B2R(2#ظU;veq`CInWHWpU6Ylp ]5WTDW4,܈6~4tg h{W u>Vz6 3Ꮔ=-Ii6ڡyK, ޣ^/cNOɭ>ta뫘+✃7sr$wOSelr|NGXyB(?"Ǹ]4/uIyϫ2&z%#rXBhpٍnh9tg!ovzrwj7WFvBɿ ]Rؾ]zgј9%FDWαЕsӕsF艮^]}{IhO[בNj]=5ofκ;$Mbp g*50$'/hڻkZOs$:{VU뤳hK'S )\k>/>ޤa$'M1rq]r=5*M]_$O'KxݳEH'W7<]>NIs`gZ~[o`PuV2VnQ^j偮{l,|(Ft=acU/'G ܠ|cn[=e.7 wͷi) xBtI94;$RPJjW rДe$=%bVr8] d9۞ܧ>s_!`нA*R[Y.r0JBx&{.F lEA+S@Lڷ]Y{\r^\Q"h ]H!)%BmRKmCYdU0W+ iamlRrE޵DlDkK @B.:d9a0<1hFBu46J-)*ÇV^&YU딣TV-,.LZExRX"D: R#dil4X4iYu2}b'Q) ִϞaD={}A Ml!G)Lm7 ]-wPFi)2"2Hb0&o`M+^0*Bٚ|AN/+sR"OhD{pS8 npօD2idȂ}GA#d_$6cys#jZȊ+C Z <' LI 1ڢ*Z_\OסZwCa+z7xS<d'P(+^tf_|0 5 GhzHVI "+/GvQ)]L.R{Kld/2o }"CjPTJʱ68JC S ^*gl47ƌ6Z)&`\t=K  !6Xn{0La"XˈoʫM`fL9$XE05KxQ|`𵠧>2UvFӒPvP1pD9p,Y˃jMn]9 ڢQQ 16t MB;uP6h E"_ :[5?cq1K*Cd#X-Ұ2k` s1g:E H.9msΡdxPlh &/H;;$؊~o61JqTb0V(Xp2ށ &!L(0py#p%, |!L}OSU2pYe|! gd#XRSz OnW;L̰p!r% d\#LA@7[@ iH ed‹тH}e2aIDv޹TeD <Bg#fPC7Aw?Ks`)fIA0iv  `5neAk*, 8ɴ`d87g;%Lj ,A{y{H)B6 aW Zt]SlpK+F$"FJ&5‚)h^,.zўjea_U@i!8l[+'qۭ^b-lIZ W])!4a#81HmCףY6cV8)|-b ľXkZ RB:}<ugy:*U}oWWP Z#Aviv.`Au=l!Ól0}%%A-fydTխSVשV̻ mBk'Q5AW /mፍPPf%Sь.'WZH e20S )Hy0&`Gev )p =eL cF1*ڲτQZ04`a<@=KH`'Ud 2inu!qFm+.=YߩPsP5"VﬨFy¶",9e5@Dv*BHvW ּL.0fZF5+tm XB(wPҧ1eC^4A9:]%@_>LG6fH H*LARXRr!ns)!5ڣIҎaq r.!z-b@ l/#6D[v& -6::ҷX<s @"4)YF4ǮvmBg$1S" (] CV@;7Vp(Xd*T0&@8"dш&0 [9mZc+ 4CӚEuV4IxdF+ j@f o,RP)4i[7Fd^`04ݭE($`F} DQ`ȖZ, mmhG>_\5:L%#W`0uQ 6trlѓйF4ID(wyf(֔=kM!JrBr$<4j$fcEBlo Ar2#bpS"XkږǂP.OH7"zh-8Hi(S:c;(XHH ШG'2(xEַQaKl+OgW Xi+8 "NRe4i'W WDΠ!gDyA aP(EiQFR@MkAڊQV9">" HAF6-5Ƌ+Bbtqc8rk,NsOZ(d e,U\ВFn5"q;XoCn1JW,KC$`njq4&ɭ!d$#8DԅKzP@ŕ[ 4gRAzgJp0FR/Ze[<RZ:` }"rHK@r꧟6_E34M6H`RèTxqįVOn_?8v'=9?U!P30{`M&tx( tN D`'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v耝@Z:zr87n@ 53ơ: 't@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N/ dP'';яp Q d|&A:CN v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b':r@.q9R8;vf5; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@zitX~9:钖R￁rZjQ^^<W'0F4;P> K4$;+F ]\{+2ΝepLWHW:jzh/tEp녮ւϧ+4B2] ]T+QW&zg-LWHW6FztG ]lCh;]Jtut/:+lI &$͝e乫C l# tCWk|7 hf:DR,7z0rV݅@+7LkLV%<[Ge=Lsӏ|utY}6/8ߑGҞI}& x$dO/ac+ۛeA2܏e/-DrY-YQBhq~}B~iЫzsu@ٗ?~s]{<\\8eͿ ׈\KJrښE{J`i\+!I-zw?.3%>o_ rk)G黿~}1k7`hJ;(D֦˳|z/\7r,TOW2]=*  Z#PjtutRvDWn: ]0{"10] ]ivDWx[OOjZ' (_]ɏlzy"rSmipÞWNBĞ&FN+tԦQ{%:+ܻ`\툦5NΝ<)CW*ON բ"vtE(b:@2B:ӓ2Bm=w\-e/t&P:ch)]T+c/tF "]>'`-\7tEp酮=]虮N?Xw4wEp?YtEh;]JO~ېMlb7tEpWs+BѕVΓRgvˋ1`VoԄrh9&S cٙ6X}`=2d_/L#&1sx٦/ϫś}?!^oY#Bѣ7&%&\:t%KZjM<u>ؑ2aWv3@h'|9^}dӫIx`3]M4:ը t ]z+J̝cEcз_+m}_??n~--ܞ ˎQ֪X#L\I+Z>ϯ՝յ]^$bڅ8uy5wםI?tߥ~hHX,y2a~*}w;e׈0O@`yhO mfS?dUwWwPܨnv9aqvzZ6iNe).^a|ݚ`1dh:2Ao4}+gCoNƜ.b?neV(xTcٌ)`XJU6zX|DQ`6Jg8.s^i#6fqwz%Puw}g~sK w_Gb4,c+ooo+=>gnj\EtfCϩ}w֏ b!;5$zYj,&%ٚdZ^.Q9|54}LGc͘jT[MYIHaSUX%d;n;R׫W>?LfD^g+e@#Ȉ(s3A9[%5R?G++(v7X7! [3 L-Xm?($9"ȇ`GpH%Y[${ȇ]Wg9d9O̐")Z=k%s===US]jw FeSTӲ>Ib>Lf' Wl]>7s塨"(LV%$ xM\="~]Ssի+7Y((Ev0 [Uvd0>Yo|_9cӈֻdslʉ8ѳt=MJ]JYuŷ}kSI4e/ 7opU_e-~ɞc>ڰyzkO#j]Hz82/@#y?`^L%u1a2޸E9e"M\DhmGơZH''JÕL>JCx(v"fuZm@5DOlTT-ErφeZ5? i"Y 6,~Z:[|5K `1ղ &;#RĝфsXq2j0:9Id^ZK$ᜒSln\[ ldbn|3I]\keq'юWaI="TrNׯU'Yuog)A8gDT=i f7>czy2}>.Ge}?n\^]\~A;{@?piI_' ero8AzXfς3+ !j't4 eoSdPM8?,D B{|Hb/XP`vZqQߺnCS8fQkAP-i w90Ds1AF>ip(YV\O 6E(:.hǷPx{E7_Qoy+Bv^o! ~w}w~or+ P>p,yo.yˣ>iճ*l'—^Om4|qA/w/}PfXXw %V-jB[!m^ iX6Zy)w/\ `+&B -+@ߢ[/[(h/ :=F/|@v9((VDeX Rtid!.# $I!А(VRxb҇UL9&3CbT(.RMeTkB8W#'GO6cPg-Gf@2O XzsjsOv`OI?~m;(R'*VhMAh: #%)1i딷m3oo&{v;cKnVP ـo;~:FKA7aTyш:x SZhLP;s;N;Ye&;QnQciqAX+bByQ9 "sGacRĵLNFzHUȄʘIB8{L'ǣB% TE%~k5vϧ (:㰅>Qk|t &%Z@v>sDӁ/`xTڈPЁsd ' Ai6nn ~򈎎dXUJNjLǟNp""=()9ӞS@mbAƩL@h5hk1ώ8ϋn ݐaj#K\V^!ؽshC1.-mvCd!@aa"byV6x%y|tztmi֟+NM0DaRBu '2ċt2L-\hg$FVԊh(#6GZۍnA.zop$;~Ƣ,z%H9o"åUG{*v/?>rGwTsT8Md.]}i#E,"Rc S3Tu1T] չP12mӠTA>F$mwe8+M[:+mAD?A<}a lvء]fhu |[f)[Zɘo)õVDjD>d 7P.bsE8L O;'uY?w'ʖkCUq0FrR(dCpG$bv1 (AG:oMϲyJ_)*آ6CC,P6Ćy1ȖRF<+pix(c}fQt )}<ǶI=ڣCU@[ޟC]NjUaW7g^-1xKUh"Nί^~{ǩ\֪4Ӿci^cTǤpSc\/!R-);<_nvZչc}*駪tgd^Fҫc~$_Df C_w6?ݖ7Uqil2AMH )oc^ZLtYui>ќ77xX"ymx :/ȱ^"0uFEsHj[hp£YM!$G- 9y)ͥ#Ir +F`Xh)]YoZڂja0<>wGf'0 (K^GXt.FEX:׎Ӈ ֓ߞ6nM6V@Or}n&թ};'e\xawQ]dVYǍ (͖s_ 6Niσ)Sp4,.x4iaX|2-3e"5cbD+mԊ`y.-W*K7FADP#hT\;<0>/Unuz9sDo絋I@eb42 %Z*AsY=%t>\ Au2RVvPu oKDEIZ8VU>/KynqᮒpF?1ӴBQ+RƚWFd0s+V,gSvzJ>Q;!S ;r,۝ m3qLmeE1]]?,K~!e2 8.8km ,J*:%y&4>6FNgNϝV;D!Ra.}`H-ֆs \X3gK><q3bm.c%BWok\wp|T1R$Xj$ F)HR)٤4JZUhuDe1o RPiBt:$O H*^lYAZszE0򝲜pH9>|02H 1jMxNG4-l ڮp#qLJ:P\b@ ȩVakmȲO3U[/ fHeè5M*$e[$EJj"RV~z:4'ŠIy'ILGV@ʹ#C uG͢X(*MI5$ymF s9G9m ėPE`cOøw[*7QH)/{yƪ`.5-#T't™21SQf{O戏892A0 fvT ,r٥W8cU8޽ٽv={!(zs9w2Dޝr-<7Ϊϸg?O7 b;~O>b"L8>p6=Z!k킍fTwIḨ,e-T3<Ϊyj͜%}]97+&3p =6B痮k W]jfA"?"cBzYPujr-_XAZ-jD{f7Ѵ |A:fRiL akﯘ,?_Oɝ>dqyRI_s@keH釛!>WY.'Kz!ʐ*!-Y ٪Ѥ69Vj3`P,݉˖Mg,_ A4*C"*Ȩ9 oR$v]9C"e`TS5rPi!5.UD\Z(*Ӟ ÅVQc?drBM¡yeW!䂐W5`ן?)JzNmt{3O'~1Avr?ϓS-8|rE{,ܔ_ԜzNO U} 0fAV<^WK[7f>588[=j׊$Ecd>+;L)U|h {bGEI& 3 +*#6O1g(wD{j e8!A=s2#z7z R *^yIz|$&ǃ&u~^ڞʜ.0A%eܢ_g~,m쒏?v|oҭ Ѵ:\yf{m4>ՙq6%X-W-f-Bcm>ȗnWz<62,r07 ii庅dWLR$JP2:#a:8֝Xab%Tq8T0R)YROmZ˪G$с>ipTȬӮmUH]l ֋g,Q1QD}2r9$~hOuۭ6O=;zvzhp=gZ!K0(QP‚7kK,q$O'=N)DP"x.JM(QEo8HsgҨ Ef A 3F/D%2@8a 빱1$PlP I@ƈ'LIExbmon˜> uɴH ^Ng^ cMNnNX`O}^ ?!}w4軣Swru}lW9S[Uw X%]/N\oVโ溄u1؂ //B◟|SLƳҀQ#JJKN J. 1e{rC&rêԎ=x3 sUUņ-ƣؤP4s5g#ՂE\a`/G8T<lr=LO(wEݗ4]~(' X9mKb~p}ͽ}:^}}R\!p]p%/ <Ԝl͹Gs? l;W&Y (ךY +"N>!ҫs9қUTH/Z :VҞ~I)`C==̩UVwFp߱9kp$ / WOVYpşW}J$?!B51'W\O WJz I!)+4X>W\MOZEp\zzK-\GQ-U6Sl-]l%B[/sj\Bix2,oؿz=M9̻+?~KjJe6t%ɕA$ep/d<9;;OJa١U/# 7_ϫQqz+;_m8FyQ0pP(KBPE^Ӄ@=p6Ǯuӧx|y[a[/pg8i\Voߜ\2Ȧ3CA cUTB櫜|i@QAh)탱J͹U[%) 69E9kWVwjC۪6jv3߲CSUfbWy<^rR$'l*_|>>bӀ.Ͽ3.lvw=E$?%ғ=&RО_QZv! ee|@`07?jx>hfT yw(~RJK{YvŨm?xZ^ 7~aΝeine^K\=TMߚ?__ُo?\,]۫8*fx]ES~Uwc%W8W=kjm~oG ?\Pj$_A)!<r o*sF=74#Q!y7pz`D 3VyLj ?{ƍʔx#`k/q]&kmUWfB tݯ(/QØ`@ݘ~h$6, [Qؚh;,Yyd "IJJ8'%rgaظswFeoGXpR*͢`eIҾ"Z6X3d́y4 POqQ0bj3^I) <:@'GxF[sD y ;(rOe`pRAXhGTtE[Ju8< OR׍$ssx0r 8N?1rJG0*#a1&8Z@f112DRY*RDWAģw& RV } VBqH&umfa;ّ'n.97wUqH\vZeo|q5ya~4 3ANxUpČu4 +|f67~CW^⬸ fY0;mG Rh9)KݠN~[]E_37?M.s~ϊ}%?7yO/FoR^?R!<'۫uQ6j*6ʹ/m;&~b),ƃ]I}wpǣ[3%"H>\~6IszGۦ=XNTԟc li$~u?Bџfj: tuۼnqCh%?536?yi#i/01pp|{*XӳmKM"0t9_;0\z58ؙssLϰ8xcA(#h?,p$g+Av6IȖz !A9OFr@]qի`{oZ*4W5kBFnwͬҴ(݊'EƅSf:w-)0HTמ 4]ԍ^ad~p"9ſF.YT瓼c=׽h2 I?L,>}"LGyQf"3,Tʱ!dRe d+VކI-ٽ2( 2`JS9WIISBÄLL*a đo>,~Δȋv>yG^}|2`櫫߽wߧV/AmN~19?W.s mQxZf2_!+-oVV5{EuLx\Hl.Pp9]btB<'q8^ {i{! yi&oAI?͞u }]Snfx!7?Դ)g՞iJ_31ToLBsh"9]hwh66yF2ژDPp$Yɚ jZmfyR A6=mDUt;0kԞ#7]ܙw=_8eNDtq~|mkfZ6G 0C`c=5"bfb"(b$E*0=w:0wUvс<O&(p\FD1!2 2zƃ V`m(` p{n@/qo*Lg{_gȝƵsd9LQc/9o6X/b8"$'cy#% V:^2BGKM*2=*2ݑ-SS>#+2]\sGYD(F1 oJ x$ )0h2L 6jXKe4zl5ͭ qUt@,Б*[ Д+ }4ZZ.B=)[i[_!Tvþh|yV#"HKaI\~qVMNk74y?MSŃflfC;9եwȕW*J\ńF&yBSE:EqMo;YLK\6ޘx@IMꚗrղbwһ_퓞Lj7U]o@_?tuLZ2-n#YrUxI2xNRנq|?=MqϤ*7^{촮Vy|wکs',U1'hg\nmx?I2P[Z ^a^E|+k8snsK(2-^SџkIu$fN =2ோu :4.or6oynnMﭞwwr=?j!knQn~ @ij|CM=,:lbvmͅ;vltb/t<<># :. 0ǓOth'mʧ L_Z:8:I(dқҀX+T@DEp_g5D r7K{Q_FXP*WZ䌑hřQ"oJ)nDI2 |:ia u\!r2I(J1,,VJ\ǂ95dgtPK!2t'2eذc@:E͎qʡ (r Yeb:O1°.jmL* DagSdJF 6Z%֝rK%bdF$93@kd쌜؝vb  oe~;%xd}qrAz F 3Glg()T4:Ü g%d("Br jq&%"63dOYTH֮`^e) ".D;#g3b/ڝqǮ:FmyDVO4†stFW2O@%,*X"t6( 6aLkn6> i3A3 hG9AQN_#ȁQ1agllԷ1 F?vED1"#"q_;˭Dx:*z8B`Q# MZQgU- 6S\6V!!Ɓ3uÓ gN H$aXLj916pj=鬳3.yy[8:f'h4rAž"1IRzo)^Drqqx(xw슇c<ܱ C?kqy? {?{79wQ.o>+u|xx:fw6)Bi޹Rp45Uj2xgPPJ_|jurI+qtU? $I1sJ_Ua?aí* BkK4*5AR鄡6Jelaq )˖.jri1Ii^TAhdNa j<1 4]HxڽAv[Ƣc\o/*lCA^.-R4:wXn-(cʻ8Ob*RݚkZ Z iV{Mqbn|Z{ }dBxWrI *35f>#$Fs嵺R΃Og"뇪'%qdn,x[k.TWW/9;aR:D_Y= ,Q׀e0oG ]iAOm+0m~^ql|.IxZUzv6)WltJ946.FqAru}ўzu0l`J`RGTkcELɟL0&ϸ9<{[m}߼2]u{IU;m$˥ ՘oy L|"L_ݦmu7-Ahang[mhRKergB0-p/J*RH)ьPZ'3'n&p;?59qy)Y"{W{VZ lIHw{|t]ෑNQjs{gtZC"W kvV3$|&?Oak۴ǥ\vWUW u>gwpTvaQs.A͔,RggtIˌ;yHC2n*C!7eM4q]trX,J/)T `ŏ@xуK?`qKgBHVX+!XX5VkY (P(:=V)2(sVA-m=3{9A 銜4 .R5N/ƭ x,^^˧y:%غɯ~{Յ3 [r]N-ה'r_+KIx&ϒ* Ұ$xExmN7<>M g?Lba*hry/ {')IlGޏZK4k=R-v`O2eQ,=1,v]z"9s}񇬙=G_|Yዴ#`2µe48n[֓mQ][oI+Bv]"b`sΜ <N56bKI&?նdr˒ǖZݬng{-|;p0e}`gFԚ8VPԊKU]F#uDDg|Ae!V!]cCUd AJquf}:U):eI0 8(ɉuIglpMh v{ȥiJr\ΏyZUެK<`5?&X-;ЧMH^,h$)Rq!0:@6~DZlf6֨^ȩ$6B` @QmJVMR4)uyOXX@1aEZQx⨁VrH ʖ++Xva,5oknK[>am4sVhx]lTvWh1lq P'aL1XW^7-W ;S- ׵]ìUo=qpo>bBy㸨ɕ& &cjjRA2H)@g<@G,$l%˛СLD&:>x6Q$QVL6}"LP/:EhQ</֌22|8e\@1()gkAEM2Y9Iu&/HfPWR0FN׺ Z{ (r11k" lTIY!Ho)*[II-{Q2Zڢ R+shmDIXɜ|ƳOaI]ô1\$@aͬ;QVY+ Q*٠t$nӘ2lg: V>ߧrhyhq!mxu"4FCXy!VimN*!zY0eR(ԤQZԷhy)ʬGn?׿" j}ߺ|Uȭ6+Q;wжI۟iM6\ȹJ/aƉxfI3ZJPo/P Vɧ_hOZ8oke&d뛠e@o|=m.mbc1Vjs7 H%%Br*k uK{>9@ue 5?\2Q)Իgb{鐪Eu ؆d^禭P`-⸙Ծf3;0$JLZ?9hO"]\V7KirH@dctU zlDAGR\^ 6$WD%SyJwg%3 w!gPD sZ[iMa薉/#"^(TW .X*E"B`E`4UdL]4$ mvHf^>M>e 'zϞ8;>i8|o?.6lQ:`&Z bM@)b% 1PgG]0wSPLy]0j!CHPL>룠%PuH&QvC7w2VQ#AqSc6ț]Z/V=ZAif,| o2 mBlj"E"T7w7 wj{7%6)MHWnD= =2TnmDi!x.$e_T2N^GUl: }zJ!yiR*Lw%x%;l S$CT "HU|0d 9Spԅ$GA_.ml':O,U_k SK!_z,ӷ͉]21,T"5P[);5XML=#Ҩ1bs{s}Y`7k.=l-( ց* &w$ld+5zrM*w5sMnr.&>VLT Ԓ;cl)t5:=#z?.6F+wD$>U`w<$>,8*C'R$>oG[EK^ zY]u N%ϊ jJN `BH{{ :'g"AI5dBAxjqW`AQN9aw޴o :MZ}:]XIY=ݬ5@WRZō`c h B\+dS@< >U`tŴ}I+J%GU"+M[f=됢aȞ !kE ꧳B9"/%pYPV&Og[@["XK:{"LQi2I^}oC !hMIL92eh\Г`"U#rQ.0)fO ԭ!mCSRE4FE&D5]< ^#RhJZM#6&Qj}?}&w0x5١"aF.㺹4Oú)oN~};.~ ~6B1[a]O.uo`U0 >m@Nțj@v<5~YkU &\FaGVEà!ѣaq$%\\IXa͠їU8r7/eb|\h|co?|ViDu6uN.p6Hrp<;j(mPckgշͪ~$5a\ψ'`yW3 Jȓkqh~\*8_'[Nkg+t,6K!r[ؼZ\BwT>V?R0|b3W=<}۩=}?~[VٳGrn ?}TiS {ihJv-lYM/5%lI/yꕿfn_f4 ?F]x~}[Ohqu:4O˒ߕv<ϟ \95[e>~Le(pkoro*F4 [S(+*H)B8'HH=x{U 4y'  IL-."WP巜(>(E6wl/ׇ ڙN7ۆ Šw} ~R寝X[zŋd*hd(%ThXTx/)x)Pwigp\Pȸ&=֨7m\/MwN=2pgHC9]jC`+B~BWMP74Svw[տS6Nl^[ X! Cb ؙ2$i@ cJhr9ua, dd/M`_+n>vѬ3qHOX#m[FSxîvY̝dsj|#mG#RHm)zV$%I*# U܃vbSsLGnVCIk5b,oV~}D iՂJ+աV) i`!UxK>v9&Hܝ/L?/5OFnv$;؁3RgB-O܌n'&<gh@IO5#`ۏ0/Vc?La|:|PM˰vȓӜ0w54 5nƒni(}ט:'P5wz[8w1473)L $Si#=e4?.'I UZ*GY\XpVCUJ2=~`YHr0@:wUţiU%wJLֻ#wWᄲwW֦qW~z?yRCW wc+]U>wₔ⮪b)%`ޠ#rW,㮪|ҒMڬҟiߴVwFEbtu,q 1~r_o?zm Et M"_Iަy:˽@ %(9,ij5 ? zS":-D+EIHPsN!tFYjCݔ\m!ݗZV/ޖ_vn4uʻpݤ8?>e4?o)ޫukF PӴ붭4uv5_t/(_{`;iN^r%^zΰt^n^\^ɍvC갩 o}nyS%Q݃+-QG罰*K(ZMIcE!`'T)r€1 %}Z;#޳8Ϧn'C*fqy/>^Ύ{t{'CŸWIN0gB84҃KЕ`M$H`QZ{x)1Pފc^3NЬ.7]BmxoNVZ]ؔ/}]w7uð{;?8߭-_njf4S|gk}ik$w^RhOL&)k2ٻ6$U{Q`ml6 b8`$e1ݯzfHQ5$zSSU*']JS[6h$%P$Ҥ x(|yөzif#%'aZ>8N aB.MA3vtR>m̊`!EcXm2hT:.!",x qHGJ!owbIֿ$|Mɂ0F< Tr5(R"Y-Er:gР6Eu}f}0AEEn?حn\mjJwWo\! C&%wJ, 8 Q*5b\M~~fΖtҽs5/l:{; C!ؙKH\kJ=Z ~Θ FԃOZ#NN y3G7qu^pm>=exLB]M>l<^/QXoC>n~'s+}׫oo///;53wⳏ0bê1ly`.j5/Nݱ֘jtT^R]XӚa7'/իtf'Bh$J*&{GeZ`H9YsVf 8#p& +2Ƶ:ςR|HHHd*p&46>̀uy#`Q&=-v5C7ƻ^-ۼ$cĔe $2Sla bE QFI if򆊄52J1hښ=)ZcKnQ'#g߉>9Oq~퓕/]"Næm-v`Y>e))׺eypjzO~e)x)L\WsG+-,iwQo]GAXTÿna `A(G-At0у4 #"2M:p/mhVe$I&XsPm.=P(I!F!-l  $W$d3%U1˙1tӀ/qSw!Ͷya-"1|nZѥ)8%Al H+Dw"$)Qʓ1D:υfB)n"e pv(Cd(.(`H²R=&CAR#)x, ΈըE4m'斂Z׆Pa$ 0sS$&|C: jAgMvd=*:4S9rP'D\ D:M`TY$rʃf*헌/\HvJ+ɨ v11h[ڽ&}^~o~?;dF{_Bi 'cA=͉Rc&|lYñ$ ")/37 D3{3LwNAr@-$:*C"Ƥ4%@GFy({@EK"1MP )K$EDImrpk1Yx9cyN^YK%]ep3aЊ2j0QN(m*/ 9%d_OgoFO꘿҂t6E''Zܯk~'e+_=uonG_i|E='˺*!^L_ݠ󃨸9:h3;z`MlNK7;b`.dNV(keՐ ҫ53N8&Z[YihmhR7ΐjH2_1^QQt\rV~?Bd"q=m]-ovk\?l;r[ځ<ν~|IGt6S!p755l1{LKsd'Soa\W]No)#X,g ?i{ OQT#{lh Dl]TFJprIT,UtRGt=8Pe={C X$e U>2T czm:vrR('npdP}z` zy/*M YPifHF1U- p¨H8$`ʁ Ƣ*F:*QQ$eގ˟BS(t"895%mS*dC+dT*6,6_^T+>=0KRC*P>cmi_I6Zs$ǖJ%:^ q,tJ`U Jn)`*Қ9{6-Ubq.u! n*#TMśo3S=MN&דbkl&pXo9C)prϸUgTjD T9nYhJTqIRQ k2JxޣD2PHֈ٭_G|b'_g1.9P/ VV/ 2A/n)dp3M' )IJ8 ܣF9h-8cžbq> wU !a%jnG?K ./ D'R?^]wtsnpM!IG$wR=T|ϓY2hU t2mƂ}/|U~US$wyOxov*T?t4E~K#G_~èԮ}J\XqrQS& G]F&,g*|5Ff_6[lx!]Ķ$z8vmIf*]'u cA>݋H]/nd/x|HIC,D#鞩>U]}W29yt-7Xj7sf't0U/~o>29E$| y"iY,:N.O_ p.-n,dMp M kޡqnһF~_ݩ/D&r"6B{(vI6(]1ѬiٿC՚м] I JFkY9;j: ZyƏ7)35(/c RQNZaSʛ t&FS$dg~ֺs[h&IenJ4q@;NN 1X/Lg@J)6eǫخ*̃m2g>h "\de!xɖPwi<&F; +5uC5] F~mFOҙP t Z)e_x :kj)gNxɵu瘬}2 % .+J(s)U}qd|HCZ.QQ@`9MQ*NIB#^`1L32vs;G }4&} ڨ" 4A o9 Aڥֆqga74`cu#~ vL)+ Jm;t@Sl?֎paPU1_{y2U)vk $Dxo('SMHpeu,*R5 )qkUC`:KBBHueDJ_L*cVH(zeL{MV&`i;CVh ^mҕ$TcMy=)BAOy #(5pH*6TYW-jY- Ys͙.qN!'SW zŵ[cTWaV7(.v Ε^hŚl;@HKi`[;R/>|z[[)KdkÔՊwM[ îqLڊ!4)í`bH<|c Z 7,Tuʬ仓cQkF)xb<;7֝Md'%) O72K .iN X US}CFSG*Fm.UIc87g& ˔wQ {A:|]]'jM2a5y19{νv7~u&s>1?Txwh[b Q`$C=r^ʉq -V-ZlXj`BD61htR(r@xt)^MVO@>gw -bY߿"rZF^ (ڠbWl!>:>:0S?"6a VZ쎳aju6K#:6o K֜^|ķLalFBZ U(Lҗ]v k. AJEo;\w9*5:^O 5Y>n7kK.ƑR 5ʌ) 0>&W9Y\e(8H`"S2NLDrov-L YzcyfǒU8]"mC5 TtNĵJ-\ CPeDT68ې-#Gwnex9W.@TZ!-JoEtX A6M" S}$4C4^D g I@mq^S)THSl7g]Yӎ~H3 | M:";5F"a.P:'87N֑L %Y'Y4 ^G3DS1|M: Nei{PZ%96F02Xh҈HhWws9k$a;F˕&Oo%=/Eq6 9٨C]*S\o#9y"H?wٻF >hItP)LDV) Z,lʠcHNSv.49fEP )ru JWb#L2d4\IF4\ yUdA8 Vq,2՛i JϪiz? Yؤß,=x6AŤKՋ|lO?@+ Û>0VfMyUqAtWy]Ȭ_g/sos㲡!jh#?ɷQ*i&>P,/5W%簺?DnzIhy^nP¸׃ 0LaPrewW"`xpQHJdoZ{`+a7Ɓ`|o%žNӱ|ܛh;K[/-NITzRKƒw]K6*Τ!~o؉45B`2^\$5r7`D]3U4-|׺zWJl0-~V _!9ﵡ ʒhHCe_aQ8;ohpuoM4EzȱIzB0>"Q$jPRJˢ8w9G:WmyY=fz뗯#:4׼K"oJ0Q}.,7]~"O6J0P' BAkPςt`!xw/ ^X3:gr~o=ͣ1:ĭ,SFQ>A@ (AGdHn2I 39bΪd\L28ll799КspT90ײ֝5Rx2\ձG_Uy_yYZ+mUFzmqcY¡yGhւ&Kj Ve_G2W˧#G#~|loV:㐕&FQyyNrIӄ0eoކLJn{Ҕq<[ڃ7:QƫϧK>JDOKH~%Lrg,kr@72Q;$![6va+E0 3 ߊapNNP$֣ӁLB!7,XMFxeW]ASrX: p9jJN ɇх.Zc;՗YT#:Z/np?. QNbL9! !x\gȑbhV\%@Y'YcxFQLFYfyK2 .0FA*} MڕCNAdBAv,O. ^#_^=Iɇq< -W)?=Y:g$$b $$;oMwuJ(yhAgo? ]ש[=԰<[t%Ԇ/ h%@=wix4V`c ~e%YqoZ+PjcUB$]c S]Z{O7;ݐ1H`\lz{rҏ$*wLOVSsToWx2)"[ c" TL G_n8?!u}p i0+MJޅNߞͨ%QT?ej=mjBVb7MfѤWX&$>k*ͨFK A0zy С6&`,MQvgpmxÿI{Au2tOҡvZb!TL^+(K4N_ys%7/~{ eVuolDquN>;_ß6r9M}^{g{ Fwa\q^u/;!ԛ ygկٴ$ߟP6Syx?bd}.x$Y]_/^ؔ)ٻ6cW}Jn2vWwUw}  7O?meeI1uzHʴ-J"5f̱ &33ݧOUGn^ y{^B^ϧ{=t;Sz=g`xAJlAݜe{J8?7/E-mڛY~mqtOVujXS_{1Yh>1!X{;RNτxN>23) 2D-Vf@Pe1*AW.A'Heb0#H9:8 Xe;*b_5 ok*U%3`pgxyḺ`PYͬ=+;u r (*_dHeC`)]|X؋ q 7?.XȢ ŊN8\ThNVpbU0tχQ h8;8gFΦԜ"Ek)_ٷBM9*RJ3\0M`TzmA)PlBhW@ um<sL NIfF' fψ}R cu6JE8̋3/YQG0 R25C6 263ɹv$aJX̼x ^ 6:C,| ЪRF 6Wa&OmX5xcrl)ylRտ񎑌1^RvIw뢍ʚ0Gw##=xǃر9q;RScFFG,ERds-E );| հ[O3pۊ@@'m cƀ$U 'Jwh=,y+hZvjٶcqrk!,˻j]~i_5g^nUG"f֫OW ^ ԛVg-!jYj h*jv|=7nqw#;B4zftؘa7cP}f+ *4M;EuLFBEÙXE!fSTo*OsJU6ơ"bs䝬p1;5{v57l:" |xՆ,uȝ_-5vn޾O+zyéZ3^!;Yr* !I[L(j(DžEfH2FRPrN7+@5sEHXy;A'-_yi,ަV-<64:' dD%&)d9d÷EgWT_w֦ɉ]93)EY =W/0CVΕ#*nȿ۷Ɓcٲleme`{}b3p̎'rYKN1aAK|t%_X!v|Ц+D1z"AGc}5#laR錞8erep%5hBM!ꨂ*^ AZR ƚ3NG|68o!eP5¬Ψ-J0B 5i.X-)w:phs )y5R]$Umql%533s 2ϔ9)  Z_r+m5WKK1}3EĦ|nzR+M 㘀kHbZڵ\%KV,\ z]ryʃ SՅ PP%3$B)0$mPڛcӖ1Kg 7-jp'r:ǖwn4mHos,/*^ŵw&>[,>䳟.t廋[ oqh[J}߶uY#I5 4k:Bg2IYQ7CzK3ʺx ?K!=P3Ŧ@E.~\_?_ ->P?ɥ!>z=a__\]Fis7ze^ۭwNR?(d잽y#o]? ^?Ӗ?)\ߥ5W7a[=ŤI~}kP7 >~vJCXz̐1'n\9I[ vq=Z"Gk< I?n*6lͲk:띜nU@-2{ ;;@ J|\UYi.6h_I !I7|52 fF'X?᤼Xpo9 vF[8!{?$p&#ZcI %Y&L] `ULbtz?FުڛN1BX+ ]5BW -UCii+4b[ ѕD+dUCGOW3?E誕 ƩUCrLW'CWQ#LQq7ZMJ3] ]9t`0jhJ普NoڊkN 3dIz(w}/mDЫ+<[Cu'Fd^(!&㏋7׫Csy8PS,|!p[_7z?\BU?p7I&"?"ȃy(C=œBˋYZ>W% ^5{? QtV59Q W}P4cXrvu%[n&:4=ͿR.21SN_+)8#o3vZyjNѕLmX1[;14i>`v5dRFNgg=tPҼЕ}ࣷD0<}T>Sjר' - %Ȓteg:уDWN/`DW -UC1Е]5&CW ЕcRLW'HW:8!jaf:`~*t N) zBtL\7u;t%(™Nc+LZNW :J=jpBW =]5tutXI :9S8jhYJ?2"]vK.g hA>BVu٣er;snzԢuGMr,-/wH< 铣֭ ,ZDL]:{\]U4g<&!&m3bS]Ά;H],@6r" =N;9!j,y>;cWu;Ĭ1#q:6z;2]6~/9Eٚ w\;GPѻ_]>zLvO\ ZBrGy+7ա &DW W+BW SU %tute_4wte(x*t:?vj(Ƕa;+TyJJ'CW S骡< 4!Vd crl3]}b)ѕēיUCGo J7NEd=UT誡=|<]54HW ͔`+Jmg:E2nYnt"AgƳ+Fe_^{7=}Ї $ Je=}HJ9xo=Zsؤf"c& i{a";kLೢ_53k.e!$3x-K𓳫r2Mƭs0[K9.˥As=m>l2[^, p ~0-޻Oہe }{?_{1?:a9Qs֍_[?>s!VFo4vs"FxzO*f2_bqĵgI*TাA|Y)ݵxTL=Էz`WJ XͯJZya ђHsy%q$ƝDzޓmQB[iRrNKܝaדl_7!85qx$B{aL|j]rHkAT':KVHJktG6?JK:&3+_D<jٜ *A/O| oJ{!/!\zXOwZFO-vH Q8:? fGfhoɋ+]oWM $SX^\]4 #2Ƶk-Hi,'$͎p;*|V_'&i.}v4CZN{_vHa¶)Cl3lg5Ԙ+YCS>w7JeH{t$)N, F⏯Ϙ?_.&ql'/o1_Z;4 d1 'm܎)zf[t&]F^ags.`,_̦c\l(]w$`7&9ʵ,/ք;,^f*uj1( UŘJWU9Ԙ*T0H*KvĥLIyJ*v ^}7߬V@aUSg<{D߲bbQ'LBHBQN$`Y \"'8ny&`Pq;+.&h#`xrVҲ4i9H<ޯ}WE[;(y9ӵbwtHTpI^1k8B"E. )P ǎ^LI?b\-L.Y3S LVEE0@\2#Xm%Q(bwXp$~ HG`pSH{)Y-MסFDh0Qut Wi93Je&dΈRH@ fs"sæ3^T%uFx"8jiӵY9Ԋ(&t.paeF^*k-0}%z䬴g)d[Xj}v!o5'n%z%9u.p7'6'ɰ9= gj% t2訒;,]щDoFEem(B eJ =p%\b*m.{Nc |y [bi.MJQmFJiJ-1g 6  ~ZS%1L>@tBh,^4n̆>HgbG.nzgb~c⻦1 -4M95 /zZ˹mqnWv*,*e k  BUg}\b]ycEУg:TKǩfAā^,eF.9֗၊ ,0бA`j ;\Sg,2\r:s$UG|oN}gW2]l:_{}͚N3SRuζ6ag2}&a5EXf˧Yd[Vsд:guΑc 0g?&nサ֎TB\ndž۫^xN;ݚflt0Fo<ذ? bG@( S-(X-{EQx~Y}Wxg=nkPm5ײڙ~z;:WsD^AE87඿i6K^I!/(uJXh~Xt85\0xsd(хlN8k:"[-zSYTlf^Z) :s(i0鉆29VԨ9:J5F}Ѫ`XMHƣ!q*Z`*`f4S8D^X$-z?biY)Q Ab% ɸtmm-OcK"(GgԞXq,WyqΩ<Hr<?|؁v:Tk4#(kI` ,ɝ` & ^X<DϊvZ\%4S).)x]&=M[LeX%UUr~`&\Ža7/@O*c9LrE~dxeر^E\K<;Vajєy Z0,53ziE guJ¬0ҝ&ea2KHx 3-sjA]NFFV9fڊfq?˩HoD2{K*텋wS>sND!^0: ?M(qF!^ZfJF)B5y$ĠDz)G":D9=ⶈLh80)Ncq,3`8"8Gf4qrQF#={E+NX"<`Hr,dra\5DaS;?0 ECZ`*e{Y4qʐbLsrs }:|P? ?JxIQO[CaA?uQFpı@wk"ގo7mln qيivrWN' d݆h<]\q !2g5@{ &[)T~/ߧ\nWY{H}aZFX4*z/RM) YA U\r,*IY˪Қr%1ZpreQ{ϔzvb]jg)x}y֧+9l{\&ڳ .XVqh[-xiYh1c:!}$2&$u'Aڒ }} Ulec!ZYLre8I 1.y:~ QM FP2r0_<$刧x@F m ix8B2͹II*ff)0 :P/eߩcmX FԮ JeERh8ce6KQGK3cq錏: R)S- pi0c},1iy>>!<^j~(lUŗ))&\bYAZF\ /ZԲh?6 Ȧ.ֻz G<6`϶JL4fEUzcR"֗H WNBvХf=xHu8V p% I1 (jRT.nEHFix8T]uI7b.0ȓ!I=_g=if\33ˁn>OH1t:jX,qj1+Jv"s&ƻݙLbuuZyg6ٴʼcN1nZjp8"#޹fnsi>\>6fu`7 l ~,;/Vr8)}8?=؟=bLVr<덀T"AN {(o޹?~$7I>~) ?^4+Vm{FzV)*FdZXd Ƹ؉J"#ZEq,oV!U͚*cSl+hU-.&R"`x%a\^fI\n3X44 Ae@ Q*xi&D&̱~` ¥9fu3nP~Up̚wŴ&d+ܘMvW藳>tCVdqѕd9_%x]rSD]s{^CM9y51;esWȍXp{PA4 z>8"::k/0w1Wk۩䧝6C10rW|X-b`?,*HT(ۍ}σ_ ~{M~/^0n0߮mJ}c &\l/4:ؘny6X>Ͽ@g4,Ƴ|2|vm`ע(I>(p:[bhN#ԛ"PV9TKa01.4q4dn//!AhP`FBj[7J*Jcg$_Ѵ;LSe_B2:`j[?[ap2z9;tF wԮrr⫶+:v"]j`]ct)a%`ȋґ.TO/pBNAzBh X`j vLVܤ fb!Dź$gP~26 XN>ogu13D/.goW;Th/7owJEPz4qk֣}+/W^p^Um3%e9g9FA 3D+m2 sk &/LsLs!sWpF_I z,m asJehIe8 hsGy{hc^y/{W˽zU[+Pc)$˩ʴF*#8FT#InXBQK|o@#voQ[HYTrbE,Jx(B(dB ^!$[dn?8ΨT֓oP%}J&W[$MpBJR@c9Df;#Ȱ3oIy13{{'8w,8D9;Ὅb#Uy^mx dekl-6YMar+qY8S{,NeNar f-HEnT0곒BWsSY`Z61o[t/|*Ob>}RYXgH(! l0/jET dNm(ZM/5suӲW@p+BZ /Q;eB7 ߿G Y,[!v틭O(uYǹwd%'t BwFv-f&jb̻&gOV8^D2Y)J6U!B/ʴ6$ȍ?f}cd&̤L;ΘWuQl {'1?q|iܹ{8RQLRuddXBGƾGk%F:4wVkdoz6ᔧ)䑮Ċ4m?^=>R*ūEݭ!Vpq% (א)|F%b\Z%!}ӡR t}K"跀[ xAG81xrrUo_,X"ҶTc{X aR؊2[wc=C7}Xz ]t*&Olަ\KaEa0]qVXZuzk&1pPR7e] ?OCokUĔjLs P,:## I6 S}J祐k;/ਚp)OK3$kX-\ 933(&d*Ӡy!L1PG`T J!$jQJvP)'LAa@gT92A*i[ VPQ} o.KŠx!H^" 4 )ܨet5ծ4`)J*Ǖʡ7@P˜R\Dž1j,D(ZRZDTo`'"'^Ϟ>݆{f Zz׋ 7g`@c邪+.ftq{VVi9&k>|PՏC*j!oGqPd2"b ;}k-߉T5` xN汕Ge͙#țW}‹B\2eggi[L8^zt</-dC硐ǍLaTApQmrރ.&8@$mY߀ B5g{pӲG5g@・$Ɩ~ ڂ2A d9u *:wֆ05VW ^O }?Y4ut|QZ_ gßMSГL) 4AĨ!R1E(3vՠVeAH9=f~63ia\{ rR$t a@ͭ+PUhH)\ւ'B^ȸE$sDpq6358&W`){''Y:C)\VؙmQ}*9BQ<[4ךw\1@$q3v FQcm/<iWLՁ?{=gQ-Ӥ)fA>MD)mm ǚixP\VO;@9'Bjӧ>iljŠ-fx`{sӈ$&122 Ag pZ&zN7.813d[}~8z}KKgߑ+AҳJN 2y=f kzdf/B+֏_\u6z!D:{ }y^\gN&Nb j7oҸ>nD&{a5NM2׶'?c}=S/;qwu)J c{(<:(u(9b>^l2){}e5c`&']8鞾)G!{j[3HHlsN֙k_<#F N#M^G.dgݶЕ$B+@md0i×|~6S+5>> 1q;<?ib6iSp8o|$!a}Ϻ0J:0sBDJ$KUwQon҇e(L9H!Bk`{GnO7)Omj-̵̘vcdꧪ[fANS,'1::{M_@ؚo$.WQHSNH( )R~ɽXF; f1ޓiӲxsZx1Tه0PTNg3WNÔFj gQ+Ag B~?Ub8xqI9%[e:)3+ $"tq0X9˨Ƙ'aO:n"YZ"Jm%3 "Y =)R=q8LXk=2QY4#barmܒy$k Xpv`|bQm~S2&4@Je34e6Xz83VpK__X5<(l%D5QP KP(D2vy{g]@Ȑx.37XL. )" VMYz{:1v"l]bCqJl.]N2]/-غZ/+m yv!Eܸ83Q2R9X|^x`gdk<\:pm"LȔý:Z?B)SMմæ֥ jIR,C7aqǎMoh/H,NTXԽMnHu+ZyKa\K.{dKPE7oL 29c@Ā9$Ԍ _CǧvM~SCv:EmO_B4ILK^Z؇b P*/ү>azjGĚ B*ۑ\0^#qt)1Ejc1;}sZoc]x>\*; \?:IJ 9HA* ymw?\&E^7ܲƉ`r$^ 7ۙxCstUv hV??@:GUaSXSUxZVܿ/] qO !e76#WhMNsHӈ,Dc CZ+nF4UH!: xu&ؓ~-L\HA]3T÷ha.Zӹ N{)8F_e~[?(#*93i8''/{[jR JfXZt c 4>.[-F 4N T ͰMcG w|{Cp :jqv[fOܷv'csq%#M@ܛ'8>ޢv,qOKdJYhSw|:p|mNFӜ͊3^)Ө銆*8tj~o4DQsUPDE/[ɸ[h!s/2hQD3~]~Py]=%_l$r B Jʪ=.挷l:lȔP۴ȪM^ןDWy7złP~d"SsiT^sƅף+Jϧұт{PM4v!悟IT֨>uIFjYVf5jT J.|iv-Fr~?ǥA/~mwact>D2|UO_Q. 2<Ş0lhQg)~wy, 4ch0 !*Ѧj #e;Pj ~[P|9ZQFke֤sRLbRYbןaIc)<+ʡ|z' pe%77t|G3_VQ-c~4;pΨQ4ϣ,ݡATc"! [f&n =[&H!޹V;_aMe?ye̹811*}a \/CkT&yY^!$btc zU"S0; nԾh22אF2CyTo{9ec!WthIJ7 kC(|۰Y39Uゥ6+`ϸ[%'=ft!؜X6:\y1uT@},Y~_bfrƋ˰:\Q~z&'f"H>w'-9kX2,%iNQ(;ZZ;g!?(KU:jZ9rӧ\$9"钉y!s@Pc~F[dof*sI`;˷XU,A{JoYW+9di=oܵM(WR4VoOυ҄^we^v/ ġ$6礷FՋTӞ6RueId=ԨG*уN$W(^T QPX`U@,VRj>8Tql# c+wi/+Io*W %cc0NDžur<1]Ö8Y|z_1g1oB g{2nF-i\!z<4O:Ok(P,U\FYgI:LO;L"Jdh?]p7&\#3UKx1yo3%ZU~믿 O>7((k!%BiRAi|_ AQ؜Sz# ~uvV^[$9x,˚$~"~ZiEQߏqYa*HEb@懭7 p?[ ݼEJfg̤(I9FB0>IY}cjd ?Pr&qgJ5ӎU'XЈ|gB)sv:45+:,spݣOKBSGI۹X/"t`R(ɟٙjKĩ`$D6aFQTsn?؁rgUhkPJh}VRgFI ("xl-Q,Q&'B1l9ҢB&EIp`{JMc#S {MFQô TY՜6B&<^eڙ"SNL`y.SIJF1 \HD՜sB&QCEwbp/y I$,0s.s.3'2,B*VS[haXk7g jݷώ\Ǥ]+S*U~$j`De%a f(!qȠ3>A =8G.;44CA N ̡x(LSM|re E&`TsIyEZsFŐR&tB/{epe9',E+C 5-(]psGNWTT.2dx6{0J0ʏ-^3~h*e`YZHM"q&D/~2b,`Ԣ3@4 ":SWQ̺%<W7neC~(,IŅ"gIusqaJEQW[QaU79z0֕1G!ݙo?j:{pp4% <Yj1]0gSs!9&+I GM[we ޴/ fƄo:KŦϳ vHw OPU\pw9-'w#.i/>UA/i01&ZlZKtq>,[EvR욄2dLWym2%K :v6h}O %6RǸ5l P s̔4G`JYZb@ROfNhG3y.|nA !I&)աt(zoܨ/vZHπ7 Uf7󵒊ͺL)`kfsJljhUZ9(t(%rޫ+E=9Vwj.M8g(1&P0mh leA &>ehAyQL>e4r2_Nh&`/'|/~-EGY!@ b,9g/n Lg49>xoso$/?ާETnT>_UKB˟IOtu~_;g\*z|xImY嵀T3 {`Ky-5 sC#1WAR:m;QIuȃ!WIzF jθw-E|Y3YOl^._Ų-퐕G `ysD ?md/Yw(}^/_/xUW?So?qQ6_V:3ϣ_el2'Y\S ]8&0D º7`Kix3fǃyyw/$MH =0)4ݽMOn^~ qn+J6 :*I\*6݃-Aۓvܘ|UNcW2_fʜ{̗k"<{k/vm5ُޞEo"Q"fDH"s+1e,qko>ةxb՞h%1U')xR2G<"E=59L(opvXn$*= , 8_A!eJGƌ -xϵf[+C"eUJxB0>'זc̓QaЎ5K3tb%oVZ: jmLPt00Si)42$f+:[;p]S`'4)XnLT>p#X 0$ƪiLhލo]l0ﻳqa/嫉Y7(YdUyƢ5v4g3ɳp/@3 ÃطBK})dqO~?ϖV ~Czߘ:Vv4ϳ0 >6Ox& ~e~nr˻FbsfKL,gUoNJ(?]yd=zib?Y_9XC=pCAŢ;8Ԍ9\ESsT'MX8ϓ XFIS>c)ru*$1O" PR1UcKDxiJh$$p+T3 @x j&tjziOcWMȲ!D ρ7:BecW7= 'N2h44˦ڠkNi(8p ?xf8m<Ǝ2AI~ClnyO$ J$b.`B$F=wHxEMPQR8 1lm)ԴX0\q2fko/P|W Rb) ]#uPAr--p>jyWs_{3σqI|S[Q -iU4$>Zy%ъnHrp2) cQyxK-$]XE%l0fJ碇*]ךi*CQ _*Dt{aVD$kB=:_pP\YgSd:H|B[hOhuۡM2.Wd7V?0*ךhY;7ʝjH.^ RXb^2F&>nj~4PCY`8шo@4U5NapBl`~W^]=_N*?)*OmfrYT7wafl}p ιrۦ[Dd)\ܸ)^1us:Z߳YysX| qо7ZlZ×j/{`J ͤ8FggkQtYŜWڬP}i~U,#;э2QJjkeÍXګ:6gPAy\"ū.ū]4Ȩ TL`\zm\es~WE"%T8Ɠ₮ ?\ MQ[bs*%y}K0.aӉU ͩej7UݾnnCD[8f3WY+WVFpnNBm 8hk-kk!>W靝^ԜX5>c|llr\f*EJpNL GtJHp*&`AKFX$@aKK.hl1ܩ%Twj*'ֵA( Ѧ_sX35Fuv-<1ݛ'n /,dn)ɶ,̓v>yc_O;+<ц{U= aVNeE=ȹ9{|ԐH B-B$-UQY-2R')!HM1R '1_-*WKp6S˓ݿTݗKQiX&0z|ooo@O1,^pLLٌMS.KsޝS)öL0F/F9'*0nFAQUkԪOPSiP|mcd_P-sF H@ҠDb*(6XkITc(M.tiG;(VMCnDo[lngf!g52r8jع&١y8o6~_Nh49 f&3<>f:\b3w̜s{տosxqf_Y@XO+&u~pD7p?__#,4t=/4bij*t>sڼP]T"M1^x8oӅOC8`Ϯfȃ U`M4.tRad}c.7T,=%@P<'斃\47(!Ir:35~:y^3bv 'M0J )|9IFZ@:q` 8`+:W#VhʰP*uRX9x)G9 oǣ<#~4BBX$W 6X'jwU d ʣgpnGC?C냟CFx,Z#[h.&! 哵({աO,2fd5)c*Ӕ=fMqO4D5zwS![AύZQ_{و$>o!&*5T㠲5༫?*d?$% :wQP0JZ@[h<ճM'ȆdQJ x*es(9}S3'K>Orm)"W] En 6'8B)GY1D:27S pkWiIQ#u>Iw].E6 NG0yȱD[hW_}e'45mi͛p WVsѻ3yx2@cK5*"bqnM1Lrln #)}9Rh͗?w!7$VB!M|: L;׎' B㗅9(b((x0&%4-VцXS jNNa5p*q׹4){ ; GHY [i"'Hs)A< {*{_*:)>8˛co(:)複ELfN~/_ʮQ x2!w5QG%46 yOZAwfPa#ߨR2}&5 þpX#(DG%inyjKͪ} p]uSr%^Ru"P`ŋO`(#x9Z0` NUېs6fejAc)a.gXO-+owQ>?lU:?Ǐ:P'B%Z9IĐ_.%6rĸ>L1J"qi*w[WbѧOCH&@sl8%!&* B\%+(_ U?|*{1OR 7T6'^[ Gcp_s8pk9YY$vxNE~ +.Td\!p*:H8Q :cQd^!.)`0`h5bh -fɔG?]fG +1G jsII)by9NpCKzAc-F_S䒣&d -5*jqLY?~{H$)Gr;#k50pTy8h8lb+mIE&춝Ȓ"E-qSbS2 -ꧏ:Aq:|>iAƬp QUdC4WjEF -<y zd7@B4ҠT^ڃ.iEzj\clo ?:pe$nIoߖn2rjU{K(jy=r$LQ^[g./.тX+Ld܄'}Ԙ#k<$MYa)'D}9/wI'<@"F|s"s7`>qGkEgS"wk_ZۏR-\ ?&J0mT `a@agS@\/*(YLt<&"/>W]ko Mi3C&a$΂uZ`wV겮OTp=ՠg8fQ8HP -U=iMK Jm#0+.bfS22ޫZtiz;yg]Bf^ЛkZ=Ζ?J -!ד ǢceE84uDRVz$Mw>!0Rep}lNܳh*7T 8|2c|~em"eü jd5t+Ah9!ѩ}QJu4C87"j*~OUDxe(KtiVG*]Hk A)52!wGBC0Ch/52zB!B푦?F;S.$鶢[z_=T{eI)[V= |zLXى Dy kZ єpimt5/'UTv̊oD ïfJt\Fz\G:jѕ>5sl.o_TAIB521aa}Fn|^A%[O?g L6bQg+9\G./wgp~bNg`#МFuh\%sZڢem=6wz@!]н۽7#zo?& 8Ajeu7YX{< ~(I}}^ivY,NP=(a u^[.;gcV\6bs69I12rSN GF&1Nz8HxFf,4Z,ugeBfYݱ RY;G[dɗo_Xz]Ѹ.7@PXH1F$0M4qpw\tqm> <_Q#N aȃ5jH#O L}dHۄ!Ѝ|BGN:=X҆-D}Ix{'/=$fA&jZ#e~D4Q{sxoױ}Kg!U\_x\\V6neq|qubU6!qr.EWkulR~ehdL!f2SAoANQPRUI{gNjd<O |19sBT95dzZѻU<bA.EI }l9hܕ[g./IL SCf2n2 xNA27 Y@HAUv nrB]%8-t]NtQifЍ*3} 4]{,~\} 9`E77|&Å > fPeufk! xTɵQ 26Ъq$qAL^$ =z>"T+;oP0[Fm!!ܫ' b<5j_ # ܯCZn9 :a\ i{ zh PtB[;?t(m#8DmAr Z}?@|lmkC|'Dj{s vAdhEMkLdmCέovPa#C Hֲ.W R}(C;(WJvZ{4#!6M`y !NWþ+1Bp[w ym(B(66C?o )o~Z6B!7tm+JvZ~ #k v_:3 W|g9F9*fd d-dS'ѯѼPbOw}LX(WAD$'/ߘRD2 ??8> (9~n|Q?]/~<4OFX+6U]\+K5f}+ipFka9\ni)UէWneO L譯#Yd(t1|ԏA8b#KbE$rmǙC'}eZ+JEU J5wVՂBeMtqYOIp6̛ef-w߇Jڊmgzlv(a9*XyVGh^O+ m-z;kۦB^v3֬m"OEIiQOr ZG ?A)YJUW2h_E:F-E_2UsE$j4ZYK]E FL'wRtl"!m ZO,^J#QZay!1 jiˎ*V[ 8A@'nb xPD7 &j|R2,j؞η8U:v6Z}Ӈ'Q\so.;YԎivWy/Q_Y~p|;?yT7wᨕC’>?LKx. 2rL%, Qm ŮiWܫUI(ȀMD*"Ykj''#wwm?֝[kpn4t 8Ȋ~U(NuwW=VwjM4*z (Z;~;<3CK͎q 2Ξo/ l/E(%N>=)xǧ/^)B̴UY-LpDj,Tb[<ʄR.ȔxO:Ns\v,SbR_"- 0/sFSS<iXj EK6DzBZyWqQ'ps2\fUv28R(X} Z('%OqB7Ӱ(06ZS),JX˒p :e rփE),3:MَKTW Dy1$\\I(d| Ô[j^)cwq"x -' { ރ8n ӡAQoR4\AO!1:J_ŵ)x=XZH*}_ܥJw͗5;oH2`Fd`u~wrUgL%C%*5,t0Q փ]Ra29Rn3,a̓,Ja2w"v7dܠivHUXf Zؼk$TĀ0Qc&HYiI>V&BT9gw@Ih|֝!J[unԞAJ$FxaHfdAlJ*_ /z$=XNIC)eFžrDy ӝ{ňZ$VVYcYARÃ;+`2cTkLVx>lխ$Jzq@I e2%*,ũ.6D^hF5-޵4Ǒ{A#0{ڈ؞=mlt$ɤ{KcJ*Ue3*z0be p7DcC@]8 <} Sx@dx  &7"C" ɪ̠Bq( =#[?͈*(,c`F_&yPA\_'js ňV(A88j_IMՈ]Lz`I\zE1PP;f=;.XĠ7'CɃdz2.q9wжF?\|rk%e/~-Rh˪#:*9HBS/^pGv|=?aN*: xkpZ9Zzleǁ(1tɘ]@'!Z(L0؀D TJlϙ[zD52B<83FT4h0KdLw5e-;*vG@ˆyF0s"0+bď%N>o_ˣAq܉0! ?91)3'F5Ȕ8yS=|!9Sﶒ%N&q9b?㙹l~o‘5]3vttjqcWꉹW5NwHMxSQ 3]vCʵyrt9f/Dp CQT[{8ކaPEtˡ`_7)(-PB1O9%5&>uqp3&a$ pR;bOv͈g|I ]H%۳Jf󆫅rڗ&+Y.l0K>F3ZXb2,u{ju>5svY`7޼]WޓRQRZZ@;Z7J$Gc*.$.1j@ zNoKltr 8DnykSxS2g^|0@Ωɪ 4J9av.Ǫȅ.3}<}\ >$KW瓯ZxSi:g}ݹ%D=[[ 7KDoN_t߿~FҤfG;h{2*p'(2cEdH=6Z͈g(TdupKj)^%æ:h1kG^$xC0cgԮՒk7*/^̧ATǂ/CIV<*3Q\Y~9<Ӥ|9{C`/Չ:` #`xqwv>NꔽER wQqR d0]Bb^@]X`jq$eѵ8뻐+UpۘPن@Y81&VB'tfP- ZZ# =f9DXCZeY䗏_?UX-srףX ]#_?#?ٟ¾1WkQa?Nr3:Jݿ/>_Cٙj_]`˟|&y^ n 7t{k(ɟ'?#=q0#8̮<:m߇';"h0F1Py0jZgL{Tk[v [ø8MP%Ai p'bn>!Kk (UةR@A@˧|FSԌit6zyFF{:z2^FTKp3ѽ[ `{(nGfm9T1`kL3i7&߂7n8Q[+;am̌+xW"D0-id-%Ǟi ‹c!/e[ s[Ug3E98龖:{غd.&#Uۙsيc\ƎB8bENt>dxC?aAv6Rr\c4!'~_i;Os'׍U4bE1&nf ٝŃ 0bXr@5C\npy諾FK.{u{1Q$?F2Cҽ&tp;Le)0YYyPR@rQ3a 0[cqaT60}.v3{*@hf_eѦkwzlgO_w^: ,i=xlfGtr> ӉU߭ :b&.5;jCY,]ĝD]L@]r9"0E֒*T !2(D5Ḵ7^52cD!2´ՁFC?R^ 7(J`7E2(:=:m)qSZM@G?̵ljCq$50"*W!>IB2їr QE1)` b_:IEuY\ :, Ej֞*Yo\/)!JŋI>L\Qrz"cxYZ;ZJ^jm,'^5tG7gjUcېlQ9D5٭MbC᧖NЍQ9  ʦL%!CREHֹR|OQuWIzIhKNHiOwk}LwY> ]֊b)d(1c&.|5f\!ƤUI1dTt|RW(7Udݷo}RHsr3ܩ8$F7@)νT3̍8ȅy5a^1/'>fH&~ƺdKuKv,-T1Xƺc Q.XոnuHe \tI))z^)p b6HCjoJHQ1ZE΃ZA)}7~\/ѳjzA[K9uU/g$ji~1jȖ Q-A ӀX)P019܃;AYJ"N㖩6z9C0uj4Vɀx vXT+L@O9i'Ͻal@4$Pfp!cQC!ǰ099!IK\bNVm;UEǡcSm$ f Me(7Eqz"3D}bF`"6"ǸJ]M xj<\Xb#P{ܱ[5NNjč*YO's m8>.ÝBmRAHSN\QP|X%Rߣ1b"AzSFj4 FZ5)!÷g|I WIsqkAc4BG_|8Xn*WWOC%) 9K.Zosju ـxȲKNS80r?D×T$VʑBlW4uKUjWqT9/ >vtdaqmvS%7?ӎWۮvbNRC'q[tX4Ӯv#ւi8M˚L;4*̳䱭l3>Dh^ḵ񇎼;S.:[65owt͘'rM)0'Nm٥o)LGv\Ek`x AntJ?) `ha\1*06 0ԚӘ#1`x~&>zijbYӎ&sd{WtK98dbIf= n|#Fn݃4Eɴ#]yA~Lf}{߸KߡgL,aՐ'Oh&XktCǑGh-q!ljqU :pG@#gCcТv_;UABا;Wc q,Y`0V7yf-=2C]m/ q]r@%Ur4;n?r-&iɯ}m/ynGsɫTCTobr_V}(,z9V4[q8 }Qgɮ]=hڕ#Gqm=-{@×ҁم;rq͘';Gƪr[J(_gFN'yl#"wCFZτ6>:鑋ahCpV5}CG 9^~x4{gJ6E呛OԎ[{Z<ֶ&Щ]v7t%,l(*4~G1M\ xh64<"Ee E7L8Cqcrߌ} ^Kl[fH6Yz9IE;u$Ƕsh!77\]~ݧi`ݍpL/Z/l=['g<՝A U)͛{?h~GuuW{'] - t*Q߮H|FDC2bOgg@5,Zoc%̀WG!pQ:YPgoE9RB(p :"JNZY=hҕ dC Zbj+ x[@ƑgG>F2]>eҠ왤A{(KʸU#7"zW] sOl}L2@dr#̰Ե .㡭{? ]"+)mR-L\IYH֥)Vg x8lhl^4 4>e!|{^Er) 1P`)WgӫDO'蛹b  l"1ohD0 g ?KcAR|0׷P@򘬸:"LHO"pV]K/{B.2}=HdZPcERLf~H˂lRGPW,m~iEe%!6}lǪWxZ}Xv1֕ioxBkvۇ]jݰY-Vj)XgϪ4-Mhi"b8 +jW~vCq{N;nvCWՎ?nc$!0s;n{55/\HMhˡzn$$vf-OlQ(NQ ZZaj _ټ1JI/Gnq [h[ 9D)RC bȑJMfuTte^*iC0NhjM@7s8]4f̌PFKSaR kY]Z͹pw9ynz@;["A|iNГdcc.B [ ZgS=W**T hлԾjѷ–L,oN-݀Gws6gZ^юeVxh$6 HyowKja1$21Em +_ \l3?ɔY2>YUU쌃""!Ǧ-+#PɌa{vח[}ߣ7)DKq%ZXLqȤP#z-|ʘbtQp/ d4kUGnGj+cP;γ#s ,v"9k˨ ?'hӳNzߋtvM>y7SuI)sStifZ4*?F'0~}y~5m{eF&ރ$'0b1I(/A@*WAMxbjgQb.˄vƔDTdmuN0ޡ"egDC?u˸z~qר3zy^8"UkC˨7{wô.H^œ9n1sp{\e=AK%w~ .`F]=yBgsNX]AxoB;EX#)S@nбHS&\L g5c@AL }{zQFL*  ۆ>p‰*Ʈ+ K9]Dl-"+rmEbBڭ/uewQ/5tt\>\sXEk`rRr74 G;ܡ#];x(/@m{`yF}+ H*)QDN Z#;~,=#Ϸ9óoo)I%ߍ3KۺoqhSܼt {OeRRQ{ ]uŶPم\ /}]}އl^eVm-`ԧܡ 9>'66'ZC+MgNSn}eK~찧%Xe׺zN|=ECJQ[f[{b+ȒçtgEd6e4RԖ%Od|p~N'|^%;t.5źt2*\y5[W:5ԣj͹B;N@fpVhzz' EoAkkkFk4K_WV֊h Z04s}mvL+5==KvZfդC&ȁl;n=m=NB?ֺsNtcWa΂ TgڝiS@l:kzᤄ_ f *O*[);A gH-<]xD_oq:9m6Rn ^+g-\"1+y7Ơe9_\M߾~e~ż?NOW'^Goڄ~u ⼊xz{l'8N/_}]&-N[|Ó0imQݿmjuͿ~m.y1͘y<J3s,܄no_zcGD|Y@_˄|v<@{=<ovU`ZځIk%Q. NdLChzޏ¿ÜV 1EOxK U[mU4#kۇF*F%yW,?Lϒ}AʰUtN$tC'I*kԶRˉ)XvWjJk0iҘN{AB+^S: ]n;02}:XSjJn{듇bdve,% l[9ltzs{dEgE{wjHj^blp;|H*r?S.;,jZ>_3jlp"/aw&،ncT-9seL0DfHpm gsstJ E+atcqhB4 Js&6R;;h@$ZH6S]W PU+-[̦pӖL%Ya҈?ldW68R5)i6G`%/ɷ"HjMt&;Tp (Nkwa~;eYyT峉ו ߽OT?~9wo:8N]o/q漜YxS? ֿfI̿8S*[ge7{p #J"cg$4tE'ncE!`HL90vALjo6OѤ`^(g֐"瘒A$f4TD(((v %J%(e#&NE#rZ8Ћi(ԌZ`}Ɨ}p3:G)g9+u^o+50}ȼq깹AGds=O.QwhxVT) o|"\&B3p֍Gf4Ύ[+ ڛS)Sks2}r 4B;̺~e!WNy%ëlpStǏHg*Dy [.3o񓯆sĔj`,`"MYfYm6V1[P cظ{S5#q9[L-yqi3UgzYsgo+85R.Uy$Bh!PkLbaxRC,Kb^,4bE# UOCmne`5z%[i./z57<aqj=a~i+%vBuS?[EO.<\Vfo{_9d<No$ZqeD'_ɮ~+:lt.HO~$׆1WTR]ޘ3%PL@Hr$n+awUYOmQ=U֯y1U]F?S!L]~EKnSL` (K'rX5P`aAԙUfprXu9:olpل0FtMNݹ??*Eg XRa]͍7)`D3!&N;b1 vDW88i50te_2 H\P|M golvs+z?zI--=k2G*[.^ԑ +Ycx fZܮ}cə{j EٟkXr$!%q;3\nÉ'6,E Z1qy6fgGk^NT\|]c%7Fm?fƀv%_/c?:CQ7&^> .zuѫAFZhj }aVM2LOBvNXr xYVA텳5٘uϷy9#is~wl7ܼC-ϫ[vVG/o3Չ׿.y1e`?J]Yr4:h:-M˞KMG8\h it8zɒF{Ei48HL};_ oGx\!}q1,ւS0؇یMыXZtĢ#fGN 9&>.рɝE A>QĥmVۧm6Xj{$m\Nb?udut*`gvYű6ʋ<JEEUJW 0O Ģ"yt70.P;DBA8r2*ڮjP9|sCC?;a Z ]b]Osouhn|/~#!U:9~{p:> K! lXSjJn=QpC9Rvm0wSIC*F <̭@ pU훭^v0C6fL~:>ؔڒS RTчh8WSOݗҤknՋy# h A\AJOQPCl.h4j aF"\MtaSM=%0+&uK@%#G*NjOsopZ`Z5/(04f[`w;+g-D.ygGа8h9ùrx#,yY|ÒG65}ysYč5%y%k4\d:=%2DaJ#InʸINA# CS#b||* vӝYS'sz%?E2WUm1F.Ore&b[lݏO2+wzGv!b@9xkc^o3]]XeUd'J'E)E$++{Əp. zs3_#0 BG\R6ߎb'8٬7}11M&ɦ9It6t_oMCMz/h1s2$۵&}E&']OJuXaSoLJ[U7"j͌פ4y{쵓qCj*D`!,)g2kٻq7ɑC%e 1ǐ;KI8zjXd݆cЬ)歖5FoMbVR"V6g{]{#5T!<@qxûy]q:4w;Zx+~Si?SvU /EES"Il>O.Qw(9)~}Âne6`B+3aٰrˮ.BEOA ৔|H8 2 &իb_??ucg]72isC 4Wt$uʊwd6%jߡ;]6H яbiaǂBy1!t頽>TA$Kdx[=Pg~r(\_ׇa$Lޘ0ͰJ j\t+ӥ!|zYp*s'?wȻx:EgFO&\a(o#9ڭ(_q!x%$?z:Gu/z) b~)h6G`n+rx_#!,'Wn4I{cs<_;X">;yϿUG>Gf#ݞFsgbb2=M߾]my~cM4h3?{W6_ng@a*ݻffTDJr<۠d)%[J"n*QCt.ۦ17 #u= Rvevͷ_"V-ř_{d=*F 1haCO9 .Z4\i7?!3 7Vrm\^NI43+?dp3W^_Rdx_/x[K {nz80%^Tc|:nPPЪ_xX~}@gůr[lRI;._T6wYͬ e/ Nq{⚠ܽ8&D;7i&.,YԴfY+!IE8F6R TB*0 TA2ۨB 7ըkBW﫚>ܯ2?A߅7-MBФ&9 Ѱ[`IEBʓ) du bv7vJ-fwbh9'YR ǒ2F@,-5KRegdS,M)SKUM|u17sju1@VJ%bhJÅР6 `P~W_x `줳AHL>w^ KEKޡ%iwkOӚHɱKqZh\sF$ h(jXll(JQ uҿ/}p' _*L]-wW]41h)ԋ~ju>xޤEƒc!t!Yn" bu XxvOYȍ9]&,O6𥣿hT{¿*jCBHsip1FcXf,2Syl~k(Oxޖ>QJ}X!%+\Q"c"Io * yt] 4?BwנhPsJ2^C&h"ڙz\_Cm%|4~+~4߄/}YLD4T\*xl%|wBgձ6>]4y7I~"ݝ̶J zM#e>ߢu fI=Ԩ5}I[ni= Zov0 X V1oIjM5L4V2eƁzd=!Aldt|,Feo'8vjԇ*97۸&U+(u4"pȄ4;K-^/t>4+WѾnGɋu &ֺGm l(?O, Ûm9 FK6OɇTcB3"ݠ]@)m4pLn jo}YeQ0y_Q^3<44jkk/ kM+-.YF?QlP-ЊDM\NFd͊1 dpc}>X}>n<ΜW,ǽeL F e\&Gqmnry* GF/qx.7Iݾz9x7i Il-Ai41mZ=b/N?wۂZBXPА3ň"3(3]Xz,D6@߮~˷ZjٰMiXip;}Mupd"$(,_gu4Xfi)CX˳<#v=$)˧ i~=k~ݳaBqIaxΑHRuYZj$/w'}DZɥV7XPpf~3G|;JqgIo W{O \E O|ܟ , B s\B̶iw!SR\ZIu,K8dee~(ZgEmv.%X%VNbyok`cApl]ģ ݤ"|w1~gWUlR%hWtŋu e>hRZo?Hs\>$ V!a -8̿2^FI6!F$}ugןcGӏ XO3D8 ,"ۚ8os컠E.) "3*'gIr(jJK&Ws!;Y,p,HX˲K{T MR4"2cӞc낊BDF,1mp8>Rh '2L{'(l_TUL2{F"_[{Ƒ_!eqns'aD?mŲh; 4|I="1l"3]wWMw 0 w|p[iFgFV/ލ; p- 0H; 5V b)+@X; +\l37PpLvs)IC߇Q}.s3y>x{Q3@`x6"S)̬pgvX98wSNw;3f#csN26W@amz;tIr`?'ܽs$xM7@U}/оB*n3*'!lF^FڻAM7z7sB$KbQ)xnJf_ih=xS>'Si)`e4EzS7E׋>MpDI Q"Vus`lxlZaa&/t0/g]Y+; mrZ(\aY_: EqItig4< s_KLC?L޸C}4O[c.k+T(k_!LSBP1POwfipSr%e"+Ǵ6*?;~>~bӘsNwsR9y:c (qM,KM=D-NQN*hR(F RwMNtBd4yl9 Noz௨b^}uZ*otg/SC`+B'`&(B .PtaޱcRzoC]g"="h[Y+*r+*⭚xpamIHn4 sB p`OMHϳQcFJR}8\]'wAfh,(+Gb$&8RArOR) Am(2)Zq',3{vAFFIIiC}m> "Fr' f ex:(U˵dVPBJY1*U)!2zn!@\F bftgB$n/ޮ]B8M750iݛC5`RZ_Ƌ *jT%>rGC(lṬFf\LI h9myoʊ>!i=D΍t]lu5=ALAf`y.NA>\@q3l "9.0/ E8Arݩ+ EXMfí"Ŝ$=gᶒoRavm<'N cʬyJbT_ٛv>j m$ziM;$}bA:HܒJV{Ӯ7V7"zM;$#s2B[U2ihoOȲtCrΏ3bb 9}dMV%œ!uE/)0óvnˬM]94HF 邰 .?]\|7 k.~4fROjGrs5l/Cf dlIZ<(Yڹh o,􄜐.XPl-(_ԐO.oo ƀrˆ'As[]ݮ]7s5HWДRDb|Jσ(& DyVt:u169 Jhd>ƏW9ӯox`oo]~}|-̢[c1C݉;ja0\&]+rOGy-.TC~ZioȔ <9^fxtagѶ~'ok5H7WW.[EUO+ꎄ>~g~@`tb\-x }"`u6*J=bB]#ޜ/#|?CXxKёK夭\bҖ**l j3f̶BǬ 7Ey6<>Fz 7ej cٵme65[%ݏ@Zrcv5g(#;L/(@zI՞hlӿeW^|m)6 Bo'-rtET}:>p8iNxÉ`f}ybnPaۓ COŇwKqҵBDqkl:,$ XVOVR n V(NM(HY-QS.-kFm[;C8O,&{sfbfj=Q);g#; X4V$O xN9 BZj]p$:w&' #7"Et;3t.L?A_ *3 $83N["y><9e\]գJ+)SEҁk^j3)f._m Z}u-ߟ(ay6ή+]V8{C.w׆f6ܞ'?/{ϸST7ʩ%9stt܋ӲU\+Dġg' {@SmqrW0T(?*mN4~sjHނ~kEiyE| 4n;9`4EBuOA *_+6w~P᯳ѣ|:z`OG77WsVz4Es4YQh$AD&1oDV)?gmM3dZi>z{>5t;DnrWX?˿gs8ZO/:9wh w̺rwQ7F~sܑb8 2[ !tJiLZ$\PHe%0Aݸ7zFiMݙ|AV͎ >ļQ9Zx5ƚA녕jBV|YWE[ <";K?ʹRkE 8A'{ ;]0V뻭DYgg#k3 Gs "KUpZC:y\[#U8>UiU<\cZr"VX䍦njŮfx,28uTJ F*L z7z n>xQ!qRm+2]-JKHN(J<3%)I A9۪rh6W%DGYT!;"6R jDRLyOjRFY}['#A1u]c &x(Pl]kȃ-nH>7rig#F%ZAJQxd#pth'd,)^*t͑ѕd{*魔b(YLo<}E7xngSy0Ƿ)!~fHOz\bCQN8$ɟOZr(ZrըYj[BXa%:d 9O! J[Ę,TS͓Оāsۥ$.257ly.N(rK흆BP)ᄮ '*U4/ vCΫ AƠb>D bۂy62"s۷BZfiܜ8A+m湨s6Q4`rc^xˎm滓jxm5Hݚ WN"u@umbr=AQc ;p "JB-%YGfU+9wskeFgZ{ȕ_aށ[! bM̽$0$X4H-^m=lԒeVIVNqhL8@xV#%{I^)JpyEc+2F &pKINν?m:7>;jA.Ee.ٓ% y@Or)-h\L[Ix7ThtS/t#UǬ| `WDb/o߽}xv^ Y!J՟SUwԾ5a,i W5voeޠMsP.8 *I 4uj [2ҁ׎ ;oK)$0yĜHjm7q?ME KR%d,R%3m Ƭ4U[3Lbed<ːOtfdB,a&Sjll]9k,iܸ8ÍzxR #ĝaBr*Wt*?VqI$Bv/ =mIqjCbtg#N UG{Yqksi3SH¨u Ö%2qNJbFc ['u) Oxi%x5eM ?MnS:Q[7Fv(0:dx<ó'ϊ~X#6MxjC{W؝ٸ*L"D| ;{jTt\JP)V\l_- _Gr+hW6-vE3ZhѴS9zWANR l t_Wrrn? A(V!]Kp]5rvgBɪ)vBU+c[EcY3 4=Ix=2PĄ䲯NNO&EF'O"OC3}:7+g=ҰȈV ܷ$'Ni8 pBy77<8G?E3B ;j_Ϡ(OfSN_E 2"`HnšjN]Vd$ȦRwMop93="vGɭdM9 \;Gu)Sp%Ԫ&lqJ@tڙ_m4 "1Vk_GށKtL2:߀/~}UϹf>Lﻝt8& \ ǟAor;oA&! ~ȝʍk0xcI;ϢuyF; WН&wxs ?}ֻ)C~\yIc;Ǖwd4. 2;ozzݹ1_A>Q>qhp7PZ?vЙmݻ/Њ53mݽ}[u}=H3FA~FsmL%n ;;ͬCk:cy?jc~boWWc_N[6Y>v7+z*0SȀIRX„‰%&3!q hf^5tp YϝJ'?Qve@v%r^}G=mNg<磅X oڣY~'|cf 'lg"\6O?Qu>73fdz5&vC5ZVTkW"(3TuRfLQJd3l '(œ:ed *xg De&`=hmxZ| $$c_uIH^3 bh^KĵY-pyf/ŽʏIzG~(q/ ʙ)&T {b/,8|LzYPx/ })xUh!Edg8iv=+JIC\zY-KYמ˒eI%M0aFYeHu8>$,Iʒdڻ gOaQR𫋧wԋc,(8p Ղ?#( OGz(> C^bˉJ5a[)yk,_cX~١8τfc<ڑBo#YZY/%<_@ṟdl Y{[Zgufx@kg̰ p}fЇ7X&34G(L`@&ZYd))6O!3,9xplxaZw^NN@T;Qd"#Q5Qd"ם[qvnrg hbSCfSh@d(-=3IBԊr1X6W ӺQd"7@7DujYEZHqb&hh"2kI#\)UGY* Kי=!R6 \-aBCp~vF&_$p9=4tNl: '6OlڽpG0Eʢ@w)485Tc4fRaa=B6Gh؍Џ62p`m j5>4 dѣA`{T0e6F'=.| S׌W*f\h!Sy&BL 3Da@B}to %Bj(o;%܋SF<5SO5NeYqLfiYzF2$疦)Ai=&RbGwm ڪm[Ɩ!X%l`D ~vjSvxNiGN8a#PQ{R'ty_ܶ1_>&7gM mQov @h_?̟4atG=b゚\dk5Ɋ*FH!3!߶܀[#BΛ\ۻof|wf8~:iZq`#c0i9q홒Pd9lLꟜə= ֖o8ِ}OƁW-p/S>lnT4Zonҍn mQ~]qfn ן)A0 Dk&1ͼ,E7sOg@>> 27L@fןt::!Yg|t2dToN=Ӊ3HM4jNdok3 ,f nBR @݆ xf61&Lm>ms'Cn/QɈL43M.s~^7+!" u5r~(CN>M}Zܑ_|҆@iuԂ4?/k-O> oLllrWS|JrZ~iW;O5zSn<˗ݧ;[ʶw.u?e?z?WYhoyIC.'9d侁ܓ48 bC:{)PXLbs)=.]}dƭ|;͗xXyEk+AU[詨U r޸ѕ. ǣl9f_.wGI6%/b(ِE-f%9VUҊAJA>6I)31ZRMG"] k!]׾%9Vϩ%Vƣ?OS0ۋOi-0}П&G{q#KHȹ6'F‡#j+#NūZcItjDR@D︕NKN?FvC 84pm%GigXGAPɦa4Wpَ~3!;U ,'ꄹ!˫%54봎H*ISv{Rt(r ˢ#sʢH"A蓩O>揉?&nci={AtDGkr8pobd@> (a&mPiK_TT{4vJ0œR^rk^%KKV].:9x>*)KuBý@U,[gzInqsX)QFXup,NHF[dAF<}Ԃ-8E^p6ګtjEZ5Gd/7$YYWj,lGϨJ填0:T o2P+k[OH}:k|g+dմF7_Ww$}s"W3l!T{k׭,w3O :OWmۻQ,yǐ6]k$r[+o.6(.Tw[~ hO)! C)nQҊ-E74ӧT~'wԘM*rxUDl ^ #QS5e0*ōZgWJTLm# 돓\!xz `Y?$Lɻڦsi¤wek{훦ĝ^}08{ s0-ƳD^܍g3Y-pK@PB]>2B|FC fT*܇#{"ph*L}p|?1>фG_}BխIã2Zϖڮ/f"L3Vޕ%"g Bwl%S%A4&{rF pq"_*9jDow2UJ3#f |u{W%}0'2J=z3v>fv6/d15?v a`Ќmq-z ܫO u;~< }GZ2A;FQvohe~Z-kGχf'z磅s0E >Dޜ!@UxM-J{jj?'Ҥ \FHmҿ.v5+tuȓ4u-Nfܘ#n>#O|sZ]ůtpJ9Qfk\r ̉'77d|ʁ\R**mLAz縉h][i R_k iS\jab֨pUrK>L֜nքTtT1@X Rblbv(-n(a W>G&^s-0sE]:K#QX+X. B:IQǚRgnߎ x*f?jKKڬ } @:SF?Hog)F8V9k'0f~$4E֟ ] Inz@e&1!06%rt5aȜG. ζ9)JPueSz*EԐB]{ׄN/,뎳jI-ˤm-dGP0#|d$a%MIF;?;+Nom̷lt s|s N JK8?3Q__^}I̳:ϝ{s _Hk끒S=aVha*\k{;Nrt [P@A5 d  X ÊwQ Iٟ峫N"ӓ÷(;15_wt*N v$<,wt GX? Q%gAQU5 Maj4OWέ^Pfrak!d={=5RkMT ph' x 80Z3ɵ 6: !줗[LYk4ǟk^aaUh@FU[c diHkKi^/>0OFR|R2pέ%\a'Y.ofx3Yߗ J;u/aD8˚(40K3 (1%3y ă0Crxqӥx xӕx"8<"4xDRԳ2hmkF 0WHLe'hԷ5v t,.mZ?bUGhy^' t4r.9GHU$fGwhc|>?;L Gϵ2٘應y?&8˛:&QߜUUdU9dF#"L (bP3$ƇI|P7KWt#')tj\O%)-HpD)ѩ/?jEUl<1[H H)%\MQQQ˳K﬐Qri| 0v^7YFŒ_d$i%43+F'"%6c)/RuY+n)Tw* tN;Twz:wTyNNCpEx~t`{BVA딾t{BTIJ!8w΢%< 9y˚tӢs [)9S> =ihtʮZ/4W !s])S:$暥F &J8,VCmj%7fʭ$XUy"k*1]kF@j"hgbGU ^d6\uuyy?}鄫v׫,v(D Pl^fp0l$"2~F{`Lw굖QObJp6i8 \05/19OT0AFvTyQ!I;V"̃IEm%3:V-8)Q:eZx+kxTC3EPGGbP3TeShzk/+Au:7rgKd\[ #cA:hfƣDPvbˢ~g7W 1ifqJZ˭;7#b]P%#qV"#`E-Z%0J9gK-/PhɜvȎhlƙDM3hL<lj2hxR5 7h=<1D[ 6 ulޣcg$R }xKS> )doޛcᙂ'J蓊yEqap+&XFHzz44DIbOgZƑb|?p7H0̧)%{mwf`+lKmJ ȣ-U+d%D"8R`jԄsgU^Ԯ P=x3J< p5cཡf'JBxSs F8yD'l1H㜸Žqj9ÂuP{ּSa8#GL 2 p=D˺>p9X88(B4RpٜFj#WvۋݏsH$S9ntL vқ#)#;75:H)"XM;wUA mrxlN\ފGsTu6'&uJ"a1Eѭ^5T"@$DGC۹]Mvnɳ03?Eֿ0(XC@R):ן*MBk3\cU<]ZÔA"3ȘщW`w9SUtLlim"Մ1rRÒ[ciд`l087s, 7"*c8fRS XkyM W4u `A==iKБvllsDWsw+wwr "^4-^qO%HǼ{VrOu̳ !b̾ן||ͭϲ3_S:1+E7핏'޷ ^Ugډ͍/171iWfksUy) UmR]&7|'NX칡Q'lcx߾ܷ/z}nǺ=0zkQ+$Qxj ݯ8=<1n^C#|}oSR b&ARr4KogaL>Y`Kpp ʅMvt>@/7l)OY)E^')]v3i)V=Xu},]R/171aspv%f^O1=@@tel;`yD&\Sm"Z1PҪs*i5Zc&`R獻yӃXOycM$RjL[P 9 fW=FSګ#>UZEߥ4$O8a%<9}z-rJ1 ꩟N%ܺA $d IILci5ʦ,D4G!SАө1ڼ>PmժrIJ$}PέKD@ "R \IRQN X±Lʐɬ@{8ZxR-Q-oaF(С*3?_& ,LOn (L-`),׋(GTaVJU:ㅑDJgWۻQun^?O.ƿkt#p*&Q٫3l6X&^q":s1Wɞp'F" #L4sd>kDjh|".Hˢy Ox* -\.߾>_P\ QDk/lŗW}Ћ|99S0lBRL(+.y(A墖(ER@w:BMbl# &m0Q\ &ke*D!(S?uw3T-l#AA.ƛ_E/4"pve` V<]k5]?$Xy'~xRσź3cb `D%L3.3jm!"l`{b[DCͲȠO/< sDjX>?KaעG#'>o£Βvb0Q! 2o{X42LxT-<\z^NaQ8{7F#UWaȀ˷wDga>L,!\qWypw;[o?ve.ZM^~+'WUxy73VTh)/eV}\2vCQeA-*$8%2nCr2J.SL er%,9͐$. ql$&!T[)3b,G )dTĆU \ևEogE~sܼ|~n_Ta %[SPKDFIa/lS޾|([{n?skv_~={|^~_l?-3i1OX9>V;P_@px0!Xi)2!tna.DJ++b(N+d4<%yi )qJ"ME* 3+J+4zK-, 4c 2 QeRZ S*RL)Ji~$l !}Fva Sp 3LT,  O|p)W5տ{K.?=wfE^Sw/EI/% ?EMW qϰ(7!"fCkW/fbMۻ)N3+WsnX.6KwVBQ'-ߞݼL9x|z{1ּSsJ R}*UA\bT+_1B5]9Fpkz;YB<̥F&բX|Ue9ЂS'=?8bUS',̀uբPP GP'~"?/m C4ur<9`s$jiȁrDD~g{.T ѕ Wbrl&wKYv$(CҭLqGp Xَv!dJqa%,OD&0N3i ~Gz*i Sp3d4SD&d9D`[iA$ AlQ")ѷ%/IgkW^ fY:v >q-4rUgP27ڙq2H4aDi4W1!/:꼤ƤMFr>3VhRfXJI&S"4u_ꄹ$bROﶟrx)rBI_ ~ ͍R0x.aDace2)9OI,!&4B`<Ϥ.7yBiFNܱ1,ٜs'-D9ͩ%ss9VXg5XHGQnV#>EQ#)I-7]R`i.(\:W—(j9[8'/!^ҧ`v'A[ M[ʹ;K ,|:ͥ+]lX_:i&䕏$^Vޛh?%/m~t%CMD,- /ǚ6 !`=:<]^?Ʉ$mnwفu0^{z:;=쿗ӱCgo>.7p?f7caI .$9gܺz|:eNQ,ڔh1/1C.uhnz^4 $"& q!0`ZV@e)@d^VƘ)"#8 :宧wnIj= &fC m_j2 lt&:]ZWܞֆ1'`M(F;LjűwCFPʖUz- ~!-9b}gRc푯 x{2q; y'jQ=A1jKS@@c?*:BMC*<~49c M!C8Y{a+᱑C"hx%OZ7L1v!zp`x DGT}#z7}oB箢tE"F:Jey8߲0 ̈ N. !S0In3J&a`ܤĮlOs@uMATWZTMM 4BL_N B˭vF9wv?9*;,i U7͡k Wo߿a aB $Ą$J H*A20 5H5,p #[Yǖ(6< aM&O(Z J D!ZB9AJVh0u!(D_ fUIΏm&.p %UjXGYXIHjA5Cb= RQyZ‚b*r !ͩ`WS)>&_ tӠ` 5i4j߇'N WSv >Q8'I4\߉% cyGGZ\՛a ׫%adk?a}bG'؆wwP8HWHٻ6$UpŊRp956;~D$KRJ(΋4C@Dr8SU \*+޾/Z7ٖis)pJSeQҮJwcJ' # ns/L{[јn͕R W둡5ѷSޣ.DžCrcBX١G-ۓ=pdgPUˎ-> `Mx51(t3=N6+O;]r#R@HX_Y$s9.81Q{ti*p\@sM]N|Wvq)8:8w9WΕGܯkTsYuM{R +lcV=MW{*S"NE ]ãTiq-.u{6uד~5‰5=̰ZGqG$ьSZPZM6r2v`]T@Zż]ɻUr@䒫CJ($5($҃K*$)X(fcV߷]Hu O{ 'Q{4mF[3Ti^l~neRt5"vڼ;Ir0O;PekŽi&d3iõ>8o*R?A @ht.(kt\N |TyZhqR DT/ʑ&"Zo* s/^<}),.>t2 9@ )8m0%e!ӌ!;k4n޹#{p֝dLœ7o:O p Im} 0t27"EBM)1c6?Nɤ lmmF6TNoub ŭS~Mak_ ; L۠x޸u7Z*jOh\qc ]CVN7*#QnciJgjw3&l}~HuчNK;_L9.o/現L}s 0?:~8 pEͫ~Z?J{g:8 T RB0:MB[%m?V"Qx3Zii2/E`bt1+8L* +z݋S[1cKz5U"{L/~?Tl+w@/C05$&LSQnvED0j"RVJZD㴅'uDhOz8UbNTk&Fpp0(1"i?{a%vXK86>j+PF{^j0(Wi7Yc "$̇K"D%H#&REE WCXZ 9 Fc#ts&@ 10KCRj4Ud .,/D{P g9zM%>(q_T%8Q0FS ^h=*Zq4g:SE*RRE&st8B@]ln`` Ljq\w4]&Zb7`@i=8OYMp~wػ$BY0jGg{&(#N::js6:cy,r p\$c5"+U"wذB[u拭A"w<.<@BI~a̕gC{RdXbl^>~V n?7qy#(O^fL[!F!p`02C$Ԍ1g1f)%O2Ǧ *֚CpvtASʰÁ <0G!2 F".5J_Y$qf ҷ+JS9,#G OfS$AS쨃8p}'tU|k?dEPJbA tn %doeP  $H  1ҖE|PPJ L%!2 "tl)N!ŠU0L$"L<P;IH4B}O}p6oUUJuqwf`4a 1LM5 =GҜ:7'݆̽F)J'7ɤ~p쟛@" $LF+k.!Uoy|f PJ Txz"L%trENˡ2 O!HءXء^,`ߧ~Ms2*C֙XKܾLڂEi%PڇI+HrsMZ54r{1aՀ\?K+UPb{&Ⱦ[=X(:gYQOWY'_y_0I h%d?NDny(gk0z(05E.f5:tt5.U}OOBЃ#ޗzS'Iaz Яv 7`~uw7rb*6svwJ󃩄p^MF5t\ɀN:Z ow&GytF~a&v*Pne["௤XI\@J3ea܏/=o_"13߭6o(Tv+!H{H56 hq4dP"Wӟ <:F\m+kJk`* 0##9%i 7X)ͼ`0Ι*r^V`~Wv~Uq/O5Q|U@I3ĔF̂t!qZ-cG G*Zey$xh_AoZ0Lnz …Pb ݹȦlyQ6c8%A-x*AwiYpQW^ٌQ7+Uz6|jqg%B,?A^\->?]0nDB=j<2DzK|^=?>Z-1}(^ưETBUVK,$5uW\e!?Ay\4roݛJTm[GW Ӗ՗Į?)8(z{v 1L?yNfV:y Vv2⩘F&ǂRz ~;$v ]ãk괳Zb^хMϮndmzD%Vpn$=6Eei˛+w6XC[IWZ՞[LJxjq3|dJmg^u_v xxlcT4ݴuhQVUN_|jݨ>߭ NgbE|u[eFZ:4Ou NyCf8NX(i:e60:5@쳌LW!m]۳JN+BjGiA**#ܘ۲z]Y-TV$l̪0b_q=0˥d62Drd~{ɿ0[1-؏?vLR%7d?LI2}}-tE^j>>ǗPxQٓ'i%MgF.0"Y eLnmeUDEMO%| AZxXN&n5 uXE_ߚѿ~^|>9s8 \z.mg) u7wPeg.z:R")9F2(fEwZIea%չA-; M&^Y?=ގ`py5%A1'em޺R!iCkw @3Tf+Nѻ4e*tyfE3.X6]̮9ֿYMQ9۷gW^?&a&b'.WvLϊ_fM۟ϟ^u՟W.y8g-y6? `2=X~A~\HŹQǭ˰;>3qygu0}6o *m&ЍuM:` >Jaǀԋ lN(a^3dzIs|(0"YvQ2bV->o> b8c '6q|t>X%]JϬ%"Pu@YkAdIpk`'d%HdVxnh;6  %Z͌ `NnQFh䘣 W#>hA%2 2wV6#>.yI|xٍd,|ό$TzZ Rwxyyy9UeM$,PLp[J21eL٬uF%MZ*<0 §[{ԳMJ%L.,rXUdQJ7>d ts[ͬty !Iy т*ZeF`Qk)~ $`JKb#j>@>%%t/JVN{e ]9(p v<&9 G )'0v ENBoI{e.`FwU-aɤ\@fHET$ PAr2yS Ê##D6<\-F!O枝lB$CqM"آA JR[k [4( {?a4wd$YXUsHT6ZDAOA栤pV-$-W/cPB&`ΰ/psf䥩!⥄ESpś\qRS-A2ȴ)"Cz`Xa#` T"kbB0ۃgD*{iXRʖNI;*|ȫ}99$6{" yXJY 08@"lcrB"1[_$'PVu'RzNce Y ,K'H* PUea"/4 Œxt,Js%sE`QBcژ[UhoS2Ap-a6N]cTg19*Xɺ!mYVlp0 <7f-A5Fp!Hf} &%g8W vrRn 5}V [Id-tA*D$ V(FHydC$2X X A4`<т2dSGR3e!Vv5΢ɠRC9Hy   R(c2R!%Ōa`\f ^! 3-4V(@ L:@+I[Z"dTUF G nPv7` J\`L"h|Kp A9r%57Ud&+A*Z %Bxk:".0͑%IY=KQ [@e)┢$tsT hFge] % ї%eh*P "M7" RDQZ, m$Q{izfx|uՕڈ DY6~Cܼukko" VHN *Ma:5 {fE3 L4,}tGh=8)B͊l.hZ҃ÍdHg,rkf53".%ȃc I WEE `ZcŁd}-/. " NحNH "3XpG.9cxV& :U(ɩwFR @ɜADɈac1D7O;<]LՐ*b4)Qw`CfR3Ŭ:"Jp{)۩ /U `QWSk@)t{h Q6 PJΚ%LMC%Sqnj@pAK^ڣD$lwvsM@0XK"h%H8t DC-B\7/Ï#(ά 2IV`6ARQbycJVc<9+HY$QRX''Ϛ[Xs2Lg⬞;K4A0T#i% Pf)4-RTi("XAcA-#g0 %lkFkiٲ.4dR6acv9N˳E;PS<嬭*k2 Fce Q7 WP7SJGBacr``s/4"E4QGM[CRm@p^a[A"ehvf c`ʷmjx4|YiA,]{I z XcF6JrclszI,B1:]YAAJ@RGdiSx+ !1@͛6 m2@ + IĶ׺2x$@r Ry'Dyư ahaG!7J5Y1< &F| g=:P9ZVh*QLG/S) X:aFAL 4k+E\#|rRΤ @&}*P ZPMI[~\X{jU|yR>woN v=D?>> q rW259mУ/~*E/SN鴷v3TL0 vvRt;'O}~eȺGJXD%r&ɖK.Pҵ=(]qE.UrN%7My{$wbJ^Qq$m,$WSA99M0*^ˤS0,+>HUP*nBJ^5jMBj[yb*T%ٺOG m'QhO2Z|%" K":2u!{5Jt_9jRJ"1dd{D 9F *:ז*K\ɡ)$gҵ0vVL{]da ku ,77/d޼WƫjdVZ"D)۲0tq]*[6ebsE}e;Y?sgfOpf,5knG+׺pʔ)ZVBp憶m S(l}J<ɝdyD 2 ‘ &1f%l-x碨R Ig6@#̵>Pd_M IA83"nU"|F9q?d7>=y(mt|YQr9+B9i.JtC9eiձID1֫&6ߞLr,hоB`+uQUm_Qk*O"8jHgD{u3rv_GG^OG8=={5/;<닄sɎx?xI=#rw.c=<0ׁ;eӼe@H[a_m:uV ,hڎ7@UM֯/l+O-;9_~_ԃtku.NPi?߻1F>9:ȿu`]b1A~`[c'ڼ`j/-"ֹӮ$} 7RI;"s+_4?j-eRIsD[՟$ڔW!./9>Z{)Pb5?WM?%״@@޴zRD'Rwz&ჷɴ=BUA>SR-)<;ޢ@xֹ[~{٧G <@wqJN^EVSfş[my5[5f7'ΝBc\[v=x5@@*(:sʋJl\EIuժ68C!ժYmyO֛OPNYo9H'~@ߝt82t~! 1MAۖvw;OUiL`jwv|^G`˫!=S@l[ j 7\%]-Q:3_~7 !_yN)'s覅!:t13mpcj~1tѱ6G|!:) &ynRfmƄ6A6ƿMXt#BS*N11c@>:S:Sc_䎎E9"+)1=w!э5MЙ6v[^u}䎎E9"+ѝcʵsW]-jtJJhs}qr~ySs[l[b{Zxnvn:u;mGvھ* 'tQlk:\뒺jU2B.iAЉāz&\_T[}z~_}m"\[`*'~6Cd3 QªdɄYK ĺFk}UGC;XbʩV7&O7x^ծiC1nKkft#񠱃 =86隣.Gds$5wVƚ׬rYf kV9w5tV!1YF~)W;ͽYdNusNX_-Oe>ԹϮbw߁] }s<d+O12d_cj)k_cjc]ylOzzi;t7'L/sM__wN$\]WG2^-iOV5>I>p%d\{#HYo{¾voi4*.t;Ӽ}kp[>Vt>k}zz,/?pYm{G޻M.ig~^%y/tw'Ks{^qgq}8ǝ;լyK%(rkmȲ}`$Vٝ`3#EAx 9դ%ZvٶǒIJ}>=$ ðjr}9ͳl.5hB==nOv}j6:JHUۢp¤S\[H*krx՚uՀxcu(&(% eUkq)sDmd&ξV^oF3lّ5̤ΦMK(#$!V9&rN',~wtu18oFucNITgj֗ZuQFE:Vf6,.T֜C!"ʸ2(ZI"((Ty%5"rD Q#7m[YMeQk`ck*^-^dń*T,J,栬"3US\+(# [pF3QQ)f*|f|6yhW4UjX(rI{y^E[HuҌ)*Ĭj?U1ṕr8Zt1U tk}ńT4բ7}d'k5٘jqQ$[b AJ [GG [g!b <qԁm\kM '5MsM\sST.xRjWuo\{PFG35$+59E;LNMIjI9e[VC M]CE`!ؐ qAFi}\:rhPQ~EiݕEțJ蒠Ǘk2Z5 8kޣws2J`θňCV)dڵD6dXJA+):7m55IEDȠ*Y}MnJcϨ0iTh ܍2 ~6YQUQU5bKT儆 8Ec-55 ؓTI|*PB8`Rjpl7L ɚoB  B_B#N9( (kcGS gCA|V P0e-]pa@i7q,O e ަo WbX,jYsMHmVS6njE!R`R4<%T@JhRKXf ^!( soĢrTl DQF4QL?꒭Z. m$1Pa6 =ޗ_#yC "H4NdogAw^{1#/UUO6hN P-ڡ88$]9;3ڙxḜ[Ϯ5+hׂG峳8@'RxvuK8"X}j=)Cͪ|m!YZ:ÝTHYD%!Z*ɨ>^{@#AK~v9Ǡ-^Ls  Ѩ9L%_sh%`i6נk@)l4A({0FO%\-S%lKRsNJ@Ak^otk3:i M]BpXnmHZ- !"}R[M7؊boIUt6Mb@ď 9B}?$o,rCaLE#HKYAGu1 5( 7@U46MBGڨlJ%1* 6CcsZvb-{k *7+gj pIXAj`%h#Nd@&c(\Fb}C?mN=Fv*LO>}3W@PҪ«` y7hx0ihٰp'Zǫ%j.+0Q+5u|m;>fi[{z)q[ۼwm-[,Sx>e_kL~'z^\a:xsL q_f>c :K_W뫧vշ:9GLM˩*?U 8;'q֎ڝf}!·?E5_lq뿟q?o9{<})ue|{ 9+gulY{=Gۇxb}r>~}ۗc!)9Z?O{@{:Ӟ8mq3δ'=gsc@6`d[ NʙzB`5V9݂H[{ȝ#?_,*@nh`w>/y>5v~ٷ PH`:5';U@{>G/͉'ZFǏtz}pK/*xَzxݸ[ao7ɣ0B~^S` Y r.'[ؚ'F~TWG?L.; G͌Ǐ7O9SIJ\e̪.oTJR"U~LEym,Z} kKsްH͐l^7|>g@+='?#S}}[qcgȟ΃7-&ecZRN9w爺/n4bqV\%j#QFWӼ~ތgvCOsovΝ>v'fo/wW:ooߺ}^;In _iPlļ'6&'w;sܥXaRO*"0&w 0&w TR'wA[ycI׹,=AUC+uJ\Wt&B1E(jn}s-c2Tѐ>|9_kaAɅz+ᠽ>{O"e=-HHlMBU ܨXɹFסxf=X}~rcr^l]UQHr4ksޕ8n$"ic>Ew`ݰ=0b$H7(H&ՍvQE222Amվ{dSs h@\nj(&>J9x3r7/<MAMЏ|/"3!"r2E*~ ˧&d7 q 8x69+x~-|0'ǡׂ z8-_Yö&~l$m;8x 9HQMTSЙ%.lm!(zįѷW`e^>]k!z:)gz)nG}tΣ }FI1JrSo a&#זH]6DQ)pO7$)$CSEY^N FdJ^qƽֳo@47yƩWGc2HwޟM.[+Gv~aa/V1e=`%7 9}(X Vp¾ Yق(˷L^pX05Ҿ},H}3~q.^'7^Ye_BDMAߍ/x<Zi4 r.ᎋ0`i$̊SЬ!h611y p>JY]xY^Njt ͷ, ʵ)co~w't\WQ+I}@bRR'HjTs!ܰ*qR+&,`.9hw /AacmN9!D*7˲W2XHsSi9յ..g^[3ci7lRK+n`|Uh\I]Roj/gy\ đ:{U Rc D˼)V[/)x ̫{/uU)Ægdh fgV(2^~ox1Qaa;dLAX9!z9AH3ZJv.DZ!(na M9Y}i[]ns5Ǎn0 aWਅ-2 ]~aH0)^z2rp._5 xheQ9"sM9Mwr2.Fp )FFK7 `L1Wlz4Zb=X WZ;B^/AyшigΒ(nH 3B ||Z3ӟ6EܻinN-3Ah^ QWq.vu,X:MH>NӀVJO.I%@)q?Z!4`0"C a*cdq|?o;vFp$)g'U5xL Q/s /'D( [P5R5su'BQm8Y?q@ܕ'+%o)Zנ[n$wQ8 ٍV#lue,(;x^f>Tw!0j  C e7TBG9RkzCaar'9,jD8pDK]{]}&L{'D>[ᕫo: ]b~w\<ѻ+$C>*i#?zsз+;PI4;эد-n]K%ԛ_RZWT#*Qϖi^O';MJ`{hé׊pI܂BʵWӜ4|fA&B_G%#?#z>vYh[%۶-lCw(]\ qL#M"+\[lCU?8é9L0p*4-Ά7=1W%uYMmTRI܈Y{6<$B D#AzmV蕫"Jq`׊MHj8Х:lAR ޅ&ZDRO cLR|o9oHWV>w+4)9%kc:?UI{_<̒jFy-צrt~*]掔؊͵+q6)#X1䶖*".* /:n*h5{oq7j!V_0KcJΗO݉{Uo6'O,8Qq4dLڇWQF(EUB +񥏎s׌tKwk0n?{4GEXLilCҘa!j,,>37`w>/( G&b:Awlj.tj*-j)4~.5זLpQv%1Gy~ JIEGϵJF ԬZVYmAw (Qmy-3FIY[‚ArO"8],čqb&ʹt\ӾOl:֕DTCjf9fJG7)Kj15sMS&>Eq˹뢜H-<%^n"5ɳbm.t~+_f;_rHkeR׋0,kQ[DPWPzf GJIUS[1[w,cS$٣%B!-̻~Jh6t!l>ԻmXoc-}᜶C{󱨳"dݡ-VWFs:@<] 3řk.UȿZڭHf,5GʩA4ħSM L9rDCQ-]M N0^ݡJDo& LLF6 3%2HewuK69ͣxAd+)"ӰF~L <.6ym# : ZeJOezɀY[Ys45(:2wwu}5;Y׽ELJ[: 'n_wgJ 'Ӈ yY\~G1ADx{5@(Fx4M(G9c{?#ޕT3?e?4N]K3kq::ˣ_B"Xh5=EkG_fIhLA:h EDe[0lڠ܆6 ~7 k┨;\(܂ 48<{cA!QGQ.z-zw^-*b^nur6"c¨` -0"g2Zg>O_z|h~K^lPqnoEIV:Mt =&ޔL!tM?'>JfwMpSNz"5|X۴!ÑҴK[7~v:ʳuSG8|خP??>4zN?e F3I01 P pxOV<./}wwǕc*$(<ԧ'F1"0t*ȏ/S$S>m8҄ טUzjb%S/~xWY6bQ.! E='a5Г$"HQ!*b$ku Q(~IHwG"_L)#V5VyVXHh {Y(w-?^W1~x??w ?Iy%3'.,a-pQ'åd"R`ly*+ۈଜ_lsn]:nKOɗ~׆j+YBN~c*-jmUš`5%j5#jģF!a ޼HDC2m6+h]Z!t11RII7CuE\ܐ͌K:*_(Qv( bskknFEsqLRcg+uW<}S.$m%HrDJhLłtFw}116TTi,``M-UOujsCߵV Z(UW +eUVx~g]p],gs׵kgDٮ$Vd VmulzsRdG4 "$QP 00FE$8!)zf=H!e({@OOY]H gd>W_lD(i%ӈpH}mbA4Y|lKl}f6tnBآ=f. ߥ|9hywF̥\sӹ#S$3 ?`w^> _N,pS׶Ekc \ >w5twϱP؈e]vx1Mf? qQOŌlO{_IRI/WK{ч"kYI %zKv΄~;e/bsG(/ѕ uSa^t ׊(W q|6o ]fؾG51?Ґ|I<ߒu" И 6`\ n vGؙޛ{-95ގcX5رI +VO-q ΠԢ4R;ߴڱ &S=%Ds,`{ ^M;481 (X aDd^_SC(J"͹ a(5,"U!@2aʊNѐ(55 b}4*|Y> d *ů ЂE7ӷ0i`6xA &IXer9ط$X sXH4 P$3,!Bs1Ʈ~nDۙ+ͷ[}Z,hE;Qj('f٘9) }_?hֿyiV~ь`);1ƎI`Z$;h:zd^`;|)vɮTZH|j!f5IZ|e{oԳ(K»!3Z^: Q|ۆ}]ݸ]1`Y.=h[#o;dJڎuUAF? (z޷bBbu@ ֻ;no}@h@٫j}ŁhVh%ZAV-=E }Ye+~\6 ͣ;T:6g{ & 4|)gӺ ߷ik1UЙ?9rA䡹6DJ<kf.BNB5/cj~/v9UU>|!T*Kl̒^m2'_F 1vvaV9 (vX $@x*:h1:c81ѝX _qZ zJm %vP;``AQpKk%_bOA6#[h .xh q*0o5IxIT&E8,0H 5HiF8B!%J(%گWY:W5lkTBUs7+Lb?WQʊYGy9 .sp7CND$2T1KD,E\Eqc"0  c,bqx]5h/©eV>5Z%؋V9/Wų/a яwVoqͿe+n1_nיiYΑ> 5[F>T""HTs=ÌKtEq@0 ( ̮8!A4Y+z=.0˞/#))Ab DJ0jLG W}Ҝ|Hcc]B~wi׾<(0szp"`}z);VMLlrz\oɼp%q7Q%xs7nCTY ^?I$ibGp8_۵cD} yTdPC*- gdWY k:|j) j ]87EfZs$~,Uvs!c!-B@Dƛk˲$B82Kֻ]%Ũ;v/x.bzs|HR9X]>;\yvT8Uwjty1ظA B\ጋ3[owE%CH?z (`$(Ju?VP9uԖ ۧPqJD@ԁT\De>ps>C]C[zgC.Un$;̀a-`>:\o͎J_t r⫂"-xfNVb ,ȝwwsyU܃p߰`̰g@^w$ `& 9\A@gTjfBv^= $t8ka(X@! m-0_h:[H1X-%>O/aL`B>xa.1:X%-a+[y4_ qkmqb27gZr%R ER!"0mUg-& ,eМ"RsbƖL ȌN~.I)h bG7by].VuT½" bEP8!%414PG%bG1"XǑL +'צڌ˫Wxy5U 5x]jܒX!$TEb5TB ͍5CE js>}a%4q@$pʿnfR@ ||a<̞'%yoJa#xvQ so/))i SJA)g=}ad8$`P.<q`:A<=LeG9. dϝ`@qα 0k4']\^IlJ?,gg0P, Y׵9W %xRwg!:[E@=syIPg՞< Z-@v‰rHge3[4V3Chx~Ln)^Eڅj|Z\N'zKqTr=w#wKx'T LbK b .ŝA¼r{hK%%p) MH&BҘ(X%1N+5 N#bx x'DNXb NĶTauHh,? CbKUcJPƪ wϫ-fY.i ոeAu=SEbEQ<pCf PR( EU $2֨ٮd+N‰_ Itc٬9)UJ<Ĵⰶ##m W~y3jl.yVWV-x[E/[يz7Le\to܀eOc-K`}aTLx˴ިOͷ!^ߜ!AC.@q M)cYI:)Y-]QE+1ShFջ=9yL]K{ uL3rղSrd6'JX\).1|u#OV3ƂTB@8( Ԑ@Z`W*g][dX p=K2'"3)q.渴Ex bD2b(;H j+'ت ʷg5MZ- MFhɥzchM4AZdݱFq; SZH˓%9rWʂo~.>Y|r]ԉ_Z{´aM,D׻\6v?+~,fϗ¿g|]Mhm哤h+=Q\wi%+b^]~WsoNqnY4q!ZFIip^ĭsNVтY j'tl +&7 vWK`Fc@7UǭKRMT1D,P7h'fB.p驉*k ADKl1iJg QTgDb at E>r}Xu-|[G.r(xv&ID(DT%y-Wю hN`-aMP@4uoC 1㡞%w PqV` ¹p;ےrt k2NL'ap9ՁcZq  @2dйzƽ|D }>0<ᄩyG+KUm׸8OWσʩEjpAzn(5s0m8sQP/[7s?{XƳ&Y+uԹx7\}fShayg*+gj[xK^YAf@\)l@Xj3pAvaC%Qn%R׋TBsdbqy>V7mroq89Z\u;d_'=}u_..hY^j}t4{eݾ˯}A ^uhy~+?%DN=bܺ8WG aqZ"hw7$~$*$~$jVɄ*+n#[?GtM !n͑Vf/pV|n;O4A03mmY8R-VזT_6@_ߙ6& 9Hx&==De\*5I-׼vCBb_yX}%>"9i9nl16%I['Ss5ڶ>o;e)CO݊!ݭ4r~݀9BSRYoQVє'iȂyGռgp_D.7zx@Fl,lݲ$٢>綻ܮs(Bhw;5ӣk 6`fYSc$z'`hp#iko^`!_~wS$Í+]4,^G|NcdmYz˯/C,w1iȍRb~[b=^-1~CX,9 mLrF(Zc֊{w) 9!Sr NX">L' B#楯4QSiG ߔ2( O&L)LSfDalg':-br%aĝ^;-y'jKb/A^5Fo5N=l(e**)`4Se@G@4KD8RkNqTSW>5crwqi7=9 늰s>b ~9zo߬Zu}ojپ_.㺼m4^N& 9Ea1|h5ԇk yS}/%[k">ǩnJ>jR(Ց"AZ.E2(9ՐM4v+ GtJhFFͨ[@s[EHŹP)hR rD6ma<ۭxڭ y"Z"S@:LS'smh3+XtNC!5VS5CEbI^K]C C> E)bkR$p$~-MK*2#5 &oMָKۍ^+ SXJ1Q.mY8F_y y"Z"Sɯ$ڍ/J16h=ZȩwQ<\ևp-)`rvc$|dm h.Fpྋҁf.z.%2xW/@iݤνBLܩUBupLlDO]`zn=Zh;^?(58w*);"N^qCB{dN hُ۟f:ѽW\6׽׃p-)&n'qxQA蔶F= hW;v+hvCB^6nٷGVA蔶Fq/.%2ehGV\{'&-i' \[d. #L41ZюVHeA @ ഖs&*Z"I VQJw Kۍ^+6Zmל*$䅋hLuMq%nh L",өVެS6x}EHLnMG)9SF]xn}H 1^kv{=X4tl@MGQ<$k .J,u\2,0ceZBt$j1y{k=UFu EMxe.}H 2k7JSnNimYJTcZ4W!!/\DЦkgAo덇Cw]h5>EӁn쿳] ՄI2(}vWն@r[oo.5ǒ2hi{)jv!p$Ta^-bte  >H!od7b[WJ\%#,As(Mb4y-"O$rEp7߸r 2v[KVK֨&e1G Zh/B `Z\7İ?n#uѭwrToqhMC_b.qeSS8]?BcNۺ}znh= `E}'lS߲o>xzpCՔ?\^qaP{ec-@,z};6.2'MRї#19^ ~D\s?룓% %Ϋčs(`M!s0ƞ:8BDL~~~oɭ%Tv  [0l8l*BEfKGh~+Qʫ t{̗M'Z1K큵-$~IԪYYQ <K̕[᫉؜&];el89$P:\Z>48 ȴ*֖E`[GCҮ8ĊI@Z!#-|ρ󩜗xe8@k 9f۝x`.,5"CaB^Aƞۼ縣1Uǚybq])-TR$*dI଴"vN5ЃhԊ1^\gm\rx+\?Gb+5=ɭ d:II=kOcg.L|cħ ULjOyC APa6#> %9R}LjOyC>` LMHh."d{؛Fz֎^c [M}5AҾ!|^ZqVZ_}Yk}v.Ñ;6'b`//DL1&s6륮ˋ'A)D`?0')Ea{PH i "$tgJ'W邟RߩZ1Sby,o7/.͍KY1N=Qs)Mvg߻"ވlkEvJ0'M2Ev a3\{e!<í)g1nb:`[n#$^1>6|44V=: <}:M![oFf4P2h~,~8k& vSH9r{nWP|ޯ1x2 -J<i2N@Rh*=MWSSRrZGUJcg +Zxp-`j:w]%W魃|!zϕΕr(Znʇlu)=as\츶ZtTZ?X&f'ڂ|p1YlA+-y2X񖮯?/ٖD뺙~I/],7[f1^2 "Zs%yyVWf9`#G]Y,XWA}P|JV f+AKXX%44cn3ڐ̧!_Kd* j$!j; 9F렙=ŎP k喳:q򌮲CbBtV:R,iEA!0 t x!T\Ԃ2.AMn0Bf:a>Eh񗱵z[*7Ii_^*@ ~8<=^aӑA 8ܺGo[B6cLf z_^_Qxxo J |H@hQ&;J-t f=4@;4+15ىDz& Mįr͑)ꬽu65g3>yC޽}ȆDSTrtZ>Uf[6R b$̻xI5awsyb?sXаIJ[Qvw/!!\D3d LH$F-pkZ`J 93Z-v5\Q)VskijD; ))I7u$=Jͯ9<=K.㈷B p U3(R`d#̙13"q TFVfњpBR8Z" l$2{p͸7:)]mqQpV!E3,Ժg}$&\ЖJ:%Jmդ"G9(ۢwT,IѮk՘\!YhۮwohHr:@w4nUoD78)uv|s)+83q!&ۜ\o0KK3.ɶxǜ3qV%g>LSGE9{:z: N-% VJ yGR v&6*(X7#R=ۅ/P]΍sR _W1miʏ}=p/Ff.*n|pe Ocn}]ә'ki9bCJ׹o%9U\QQ1.dV$6fHSNǸoS*s).LU1yA ĚPWb[iA"q#Buh2Rܶ:*p Ik70M1|Z-ה[]-ǷطwIVsy-d8۷($[=AFi\tTNb\$[1 CzR~t;@jU#Cn+`xꋱMY<+CwH˃UG`@fyBIwȔꩾfCZ\N$tC9D{ARFkQ4b PXj ,DsHd;RxqXyd8b֢C6\\*tas;K(fS/ZDK.)\q6ѠQs _2 o\7׽ϑ都vqk3xztrk'=}o$8C@w#2O)o'^GiV/䖋fRWM԰y(:g؊$PAD֚s>?뇫ins/%)(-msQ˔zw"rJ\aGIŜU:kj>0ZʫVQPү9#lwxxzΊ1?Pmy帰,}c]XX FDGHRo+w렢5*|ЅkVz3 !;H1k]Ȍ֙,Zl1^MD  u*lt}dQt (¬eJEqTG:Pb08O[ه$ uYE%WP1k$@Jwu-yJIyFsQMZ1KKҽ^0LhIXaJ*\9bVq" [:Qz}v X 5xEG>ف_E;p5c gXYXnؿB`aDiPYs& %׮H͏ &o7Oӧgz/w7i+\Gϴ7ӯ\$b6 Ig5H@o¯7xH 0o=l8i5S\LvPUC!uՆ1l3)]rC~VURVA_anߛ본us龜&LzYl3=L<}WLDᕍwf`$=yoAAW?/aWQmy;}@I@=#l eVVfVfS02jrO%TGrM%]M;4:@\(HU` cB`"΁o`m4 `z: E>1r;101&Jk%i@ 15LCFUl d:*J5)_xN>ÈNׄPI5U:$Xq.F! APMk,f µW`#RhoS-v5[I%d+xݮ) wtonBDZ{, x5bʷ 6?n'q9Kgݟ-.ّ7a.4ÅCp~N d5I|kă.&p;\Me?4)5Cn~:Q[֭fm `#^7@#Qtc |cZˍ0EQY-t V`F*"4aky"Gxa%BFm`Y,d{dANVF9ڻGF daK9|M=P(|hGTz6JIH/w;9Pn`mE#A[rKpoIr /#f`4D/'3B0ATriiJȸH480K:'A~ a^FG 3` VG'h5i[Oq`g TB;߾U`p@ g0xOU$4 pU( .&|@}.$PSQ3`*&EmU؉%'Lxy"f?`[vO?BJD1:r`Z/BaZV[$\"l,"*7;HXjK_ k$vBjD %"PvhcǶ임# zp_XMcXèMZ6y훹@jrxyCY!l;9>%)F,YYUR[sy-Oo3!|NE)EWQP޾e$OЭ2ڭ'dMFLfu4j:,8* )GZ;#e U٘x-0[Bc`xU=3@ "4K.UNcdOm ָ }ssMNxs]W &]-grr98?DJjuiڧ- vw&-/F@<"M|VdӘ٩tb%}>mn$,/(RcM6%2g1Y:!J6.SY7~81X Zsω t77y,Gj;R ~\AQI DEXg 3%Ϩ!^HOEY{^)Ni 3杗$Z]T6A(t+rE*(Hp3|toߙ 4R<$DJ:0kz;4C/đ2Ҿ?NnaPV}G:?,4wmZߠgfgo7s3NrAzܦY-@IXA&ZV-TQTx7/fX#[ex-cxD{}Y=yIP,pft_3NT H/?3;mRV0_?YpkuxMow4=:ޏWף T:Xo#vU8UpyWx0ͰQ%J.=:Khx[0e$ىP_դ<5!_~TꣳعEk %O1JV]vt=E:1=ۧ-t/6U,.24#7a3gnY9BOSMa}2 mmσs} {!#{C &a9b O(0xhkaٝD\WpH{9R@o0p%4In@gCΏ :lm4iWj?Zeu˃IbL#]1]~+{7]=,S9V[c2U@v-)A43֔]iX Xr۠oKٕOv(uA0Rӿ` /נW8aY`:# 7TjvOOz cUT"}O}NjNK*o^"_YDo M: ia{v^ߙxd7>OXrL74dpR\{z] bmkKU/x!9~Nxˍ:bH ]x&"0(i\;|v J%PsF5fQ G$I'QXM .Kf\Mm,+ qr6\ᙔ⫓XcK 5,Eح!jIq 5 !z;ZH>yn`kQA@4`#"Nx40D oSGxpQD:)i@UX=/#Rң@4ax}^pmI(Rg۠_qG׋EbS }E^m6vCAɫݓ-j7bm0٫7F sث$^i, A` I|:Uԣ( $^4()Ҕg(Jkx,PC,)#GQPEi E䋋t7F4r[5C,E~^Z6b"È*#I*pbY ؐ!0=h&Y@nx!;'`I`,Ngt:wx}ΎDWCY~p:6؊zӲ6or=:Q |قb}IYtٞR-U˭Jp9iԴtg*l%bl#Uoa֍ӋY) ?UUN\߫>/mxJS21ܥIGvXz2cTLk2aLX6AgwLzTb <+otYAXG'=[7鯙P2reoy;zG֗h1 >޼AKomHZ?{O#Xjov}U Y .l+~bB`i!ʘcU<[U^ZgZj '({9]7h4x\L3$"> ËU<{ն^uZ}xЊa#b*eh㬼1˫Jl5A#VoގU#UٵUv_޳{eSl`G?lzU|Od"Qa+v $ipyb ":pHaP5R'm@4Vؾ mb/!&}M͚ .vsRȌ?!x6լLo{>BhIO ^#Y"^8F1e 4 MÿޥRz=N.#o3/o2x&J*i⫢&Gӌj151 aFD15:娗;ׂ͜kmT|q1x vs>bik"l),AzFß-I 2Ǵ8^^ޫ^%*9WE(ֺcT`YRu[H#J[E8ǒ2upp(}P+QngVx3wSꘚuRڪ&HuH(] V[!G],xu**jAFm #ց2,Dj5H sXXBR|XibnGʙ>'I~35c]o'0}G)qo~t}LɃ0:*!"pK`v!=(žrkny[Q;G}zPKCр5&=jV#][o6+?M./dvɼlEQ5iUeQU٦bK"%J.!@v y 8jat.l,-pFvQ= |q\O msi4N C9.U\6`9LD8c^جt:T-RLa9,sriQ[rIA9.wHb)s w9t^>o@>Y;QDC<dPZԚ;Cu(B;Ni^8il2-UOkkHi + M.8͉CYH :5 WHeLaCLsEݎ԰*[ܚIyNyKJ7u㪊JU`FB& deBgL=N<ҬFM泐Us}Y'e]̧0W+70"޻*W\&!W{%@ nuKVx=i aKCdOZ]W \v+=|ϿqukDv*ӯan|jg쵨"}4 lmU^,2F)B6]V迸v8m2Im8W`Zxy(-42o7gÓş\G:޷xqi+f-VǾg;L1ڹcC_Oc-jzm_X=:s0{sÑd{tsG?$Hp޺r~.-^W#c_l$i>(4c\uby>Mud I*lmrfq=ώMrjDZ뼅hͽW׀⢵]}YNڶ+;x{a]]^}5/mŽyb'k5o%/ڪiټk>6i|/n޲O\6V7z 9k`rgϙA_|iKPz7$Y~uyqykTs<l+~E if`V%V~W 7uZA8MYMP6 `@х-Tm=&:?ONg2IZˇҕ×_O鞉^=۹ ",d?d?չ>4kjyy=},]"ycmKZ>n@^F.ˆ}$mF`ĬVϋkX'x|!D!( S)PsG Rڃǵ$RCB}>NP,8Az^rE{U})Jl*xhU9MF)jӕ[݆tʤC7g'^pӥ̎k3D¨=#`K"-,Bw)«#nIx$zsoa":F$!, Q1;Qmȝd'Q<0w_>#yf潛WHfJqL&5O=ǮZh&mƄt&+Ǒw=Os1<~,BhN[@oL'KtWzrZ!i:Քh7AETCo{-j>~am+b͗)t0^R-$quQ&[w*1;;a$GTJTkݻPEgp=|]K+X{v-=X\)j)2U*l' wEʋ7W;\@&d҄ا@ R w,a%r2lR;4ɫ$ÏOQa[6g' n~`b218br Յ VH D!i46*,5= xC  ci95%8/2błKTLdHBJ 7e9i4D:-0)*E:cFZfYFP#U&V+:Drgpf,8;>-5k nb^ŏp~u>dڔ/ kZͽ֥3nn G8L-v)aZe Nvm.- d(<˅ q b&WR "1/lVjT!X{ǾT`$T/dc- @$hM1B Q[":ט"BKa(k9 %' M慘`\`ȴ(˘E`w c;$Fx =F2̊G7j %IH8 CFMӨAW#K (`\OvuZݹKwn.}r>ը?>.F V*F(Q`C2Jzƥ1I3$\;YIz|sORֿ 9wH؋X6`ϩtL0;A0:GTJXlνg> ɂLR"-e`І8*G" r)$a&[RՑL""ݍ mq?Y~6wr͹B8X _p's"^ +rYIq,0^}06iF21H)! v$=xm$9lb՜[\,1:۸X󍋥G;̸80qqlb)cB;@=۸XU0m\Ŋo\(=۸XW咠ln%\ QhNHI:ޔ 0F`\N O賍UqJ#5:b6.|+j4gg?ib'm\$!eq/ _}]߭f׋[ΧyA䔁.O3Yr\/C&ڌ {}C } A=( ; r&චI(%GAڙ,_O Y/ֽMN%^-.Dj;`9#| Vjui|N% tzae_nn % .sa#m$L(Qǿ<nIȪ] z?:T yPֿov:|59`?+c󦉯V%? 򤖄07 &m Y 6- )ڂ5EAo5#I+,9N:.ͅiBMrxd .+h#/@.Py Ǫ}r^^/*@ژʼjz|!x4`Ÿp#Ād7|VG,Edg6ή]6K4UJ$#ٽ'y5EE+,e m/ӵ[w ņ ?â%he|ѫN|c-{ߠ Å]žq p+1]V xpQθ/hE\/A#$pa=fU0}{;gP|//$rU {>X̋+h?zqr> " Bi&-:K4 w=#AJ u37q//77?ǟqݢTe)Í //4tpӵ 3s3~ߒ7ڔ>YA, *hD3U*Yfh,.)gM?xlpۤ0+90S&264SdZsP*8QX--ĊFr" 2 \I\&[ReАrXzQ:{p}(57 ֛Ŵ| ;kFr(F9u=-ۡ}*d ;C .hΰRa*T(+!`ص[YEi wD@*BaDWQ:\AZ" 2eL;n1EVHX0wP A0F2?{Ƒl /C!\_ }v#O6ErɡdP"!g&-1uNWuwUuv T\<Ӂla-b{Y@H0TD@sjB("3Dw0\A;V؎1%L4NzGx/#VQf0&, DN2!_=4G ,Nax61fkB($քӠ0!;1)KB*ae`!L.yqzj`&\c$hB\pd9a۔V̰qf?j!},eHRXag`(uH.Ka:S["N hZBÌk 5aυ88a4ksf%<4! @,r&¿AZ\y:ጬA8m@KN l]z^tVǥ6;;.5߲ݿG=q"' ^/_Aa>qC(9V~ڥ?q鎟OM>}tϫǼPk={yrvYί}DDpv 8|=/.Ί?.? ,_y~ghOз.@H s:N)E[+?sG2a EOъ*{:ͼwYh!`)<ŵqW037̛p=e7˽m:#N e'  )L[ e[)X m̧3ywu|ryq!_*U0* ;t?QGw?LCϒoЌl3;_ ];x1Jׅx;wӲEwcfȾF}"@3ψ`ԥQ)ɤfa:3cP|a7iֳ#T-Q˽Kx>uu3(~ dQYF #l|d#|N^{("%=R-y>4պ㠙-)*QBQ;3 (lPD!)<~uCyz?,dx2O3/{tk~l?u/vZ)핫m5޲O{6]z0%28Ӿ_m[AuSpoa:Ӟm:WĜ.㒽E`ﺝ? =ɨ^«f7_I9'[h&ÒgJ iYzeexwZքۏ/S4=M-bGC4 SzGc[L`2x; D_-m_S.H?`i%nų)>.B<YR;2EY_MgN:9YSA0X Z~7پ B )ےN͑T1݅Ű/i0cISI\4[U 4g bTE&XhRQG) LW['6 `Ñqz0a#2_d20@O @hkju`4+߅fٿ{H)å-.Fi;qĂ9hp>sаNd17BL0bS4r8CKq^W ͸UsHrd)(qA>i)='Dat.×a}%rm$#T:۰?ZӃu:Xғxg]:7:={0!<[lHJNd+G0RJXo:8KtFETGý0tj\zJm-1'=cz1=WD('M3zk2&1s,b"tJZ% sT L`,Ü` "ŷgPm(ǹ8QDKnxL5HXJX9m%;'HyRhH0**d`( ⩌0Ƃ:żGEbҙg?En)Z R )_ý Xa~m6Vi0'Oekkaz߸@KV` %6T)h& Z^9O *2``%Z<vL.PNj x##\`0"XDQZ2 mS0B&z{:UAR=;?N`\uU؆A/#zZb 3 eGpO)JH1qG)_ (C!=xإ=`!S덱'?S-5uL:R-[Oruk( *)B rf ޯL_oGCCM0, OhOUm 542 \xm+x]MpvfUyyO+|E60RsCZ 娔0rQ=+(}O{쁳t5#o}]1هd8iY>{!=_=-*}%]֔%s=8ߚѫU  }dއK \额YF4KwDl6#SW$P~&Bg0Mnfiz%t7X- 6ń*MiXu/߸w~xMG`ԏ R1|8iqD5}rH5"O~~rT(d]Ňͤs# 8춺w[xL}gJyzk#*.pRjdC}4EgJg 7)IeϋRˊs W +_f9,w~ix?Ya˥ԉJN#qs;ȯ m;B0ʴlÅXO&b*Id^+)Vȧ7xW>KwK bLq,Ҍ X+W;fy]~w=?78Nֿ6]"u0M 浻_3;ZodJIIIe^>MJ,Ú L FؒӻJ~yI]I4=3`in:Q`gbbgc(Cl]t?pWh@rh1DשgW)Ҳ~DvPeS=C 7,n9ÏZ2y^tƝgv29CNVT8\Kl2Y~k{fJYҬ|zol.lq;y-.# gW+œ/݌U\,'dZ1!'DBgZ׮*c[UZީ[NDnF'_HVrKϙF;iVR*}ًR.EKųmUeH~l3AI>᳚ITP2F&,f+_a&9ocnGkwJr sW<"WyJ0kKqK1LZJ*"U1F. r:/f 4"YDTlK+wim* =ndthl8l[Y%67 ZxL@TGUL:G)Hi-5lٚbI!Y:LN3`D-6:?|kOwĢJS^*O~S_)i`!)rDXuX8<{ GTPJd4|pZgM6Dy53@~N݆.;o: 1I9 LHAkS(Rbgk7~Rzd}L0N8>~zFr9#9짜~9짜~9l ;,z CR //_fNfu˶kJks.iAR=.9~—נ.<aWp]V7!?5\lVlr oF])Ir= eֲ7?9&r_FI//R)fh7=!R9֒bM, +jsBivR(\" .6 Qjag{vIuZ0vY}۳٪YϖC| C$傛V(W''cF Xˀwߠ2-ܖk~yŽ., n>)n\giw^urT 夶/0cy@pĵ iƢ H8T$.?|K6 ӱ`H)I hi® л2QSs8l= "}hk(7QA423}1!`o&_~4($Ŏ9Vo&9cTK!e}f rT/p4&75pu 恽5sY`Nr~f\C˫jO{C|ox{O15")=/״T_?ՉCqbPᚅ\>;ߟ-vz KPOEP_3oFr:/]Q3"9p֠>_caz*F3}p{ɱzpnw} #]ՍDInd*L&`qe1W֌ k$ZQق 5 sdYc1f,`7c0FΘ4FfL*fL5uc2N`TR9ss&hH">i`|+6gb :Τ%l$2A*]/31gDPTN8"ʴѦmݘI TZ7g̳ P=ͅDdВ D-BFBSj#\iT`I(+_"Tc&\A[3p@jÄ S5 w ճ? .L^~L[\ȅLJŚH.d,$J ěp!UJAc% ٜ ֲ!pKmҙ$UԻJuo5tZ&JCSMvM#lβiH !9jb_:!P-)kA)JJcM\*N2?+Yq-Z-4DlUdT#s4ҔLp Ǭ^/,6)Na˽z֋YMףϗIHplCjUOG5D5 ؚU|MԐE.ɒW^kcG5!ZQUè29+V4>IiT E V$$r + ~RrVCG~Nja-*R9|w[M敎W/\ePDwenJ"{.8)Vʄe…ĂiE1*o%ؕvZ{Pap &!?-֫AO"nIIMUǤ] y":a"jo MbOGtXJj&j|gHgp_Orw8*Jqn PtNosާ[cf7`| wJ$JY[8N řcap_"U-;XyYq;WO6@"embOt:N+s>,x)WP$FՊ/њaG׽d9ƃ(TE\ jXpXD"a^{YD<C!7,ƩJB]Aޤ*iT&Uق0<.h#f2ŔlA})p#[_W*@3J5/[J(u ǔg+A6cH!̲KcFTwÁa\5(crh\ fqFNҫ0^YpB~%2z߹÷! ާ6KOE{`8\鞘|g&"8v"eխ#5j Nf6XX۠+[vq@%ŬOgm3B |cdqSHdbhʊslnRS!\tY^[M|8y{7+fIa#l^yRom,"?9Js%vR N&_It@vsߓ\b ̒H f2X΢Q(~-,bF]G:(`s5,C1!Ox njrpa;Фrf}.wr'?]dX`r{@"'Mh_T:i9{'O Z$Nnz0֗:h#WC8tm?xO7Wvɀ{"o~czs7>evYm+V _^|`YHWT2RD7ZB"]9\OUʯ8[vmmF%L.rM͕'A  e$£Yx-0Gjxn:MOղtIu'#5kUe*3 ?n1XD?Y's#R 3NNq3uN6lR#rmt$Кc)ӏb PXSU2a8ŚHZyh<VNΜYgU&AL˾,"㕓#6`ã>ڹ%Ѯ:k&+QdrrƖsVB?}j,{%;>E3ttF3;2CQDTyELY;wv8 ]څ}'c8HdkũJ<ٵph _{1Xs[sowť[2 QXHz.NR^#:J,I0Z B ,#7Asܶx9NJ8Bij֟"hNJE_>iȐӻyRfO !?;7n~yM>˧XSœb>9Ի_.؀ ӄB O/Dc%:zP.ǟ|vJNL:2zޕ5qcK*5vJO2378M'BRxRsФfi$mSF\|gGs\^@ nzSҌ ZDSL$}S҄ JSO 9uI%஥){)etCژ `z's܀rz")Ph@`q=%ANҨa4F 6H-GsFLjDNLzɔ%/,9ǪwZYpM<pG5Vx4Fr)7R`)T:΂ x`r)zI]HNꇖ=nk35π mY!l$%!7Kmo.C(ߴh׹'"uK$%7Z*k7ڍrT%k GR+rD:ucoUO7K+APxNrNyE~N9Se3Kv1?$IT &eͦw#8CK'6VZMn RoKU& ݇ EP ~y[u3ȧŧ{Xm':ӳx **~PZsW4j0'pMk:&Bsa :pݽL9CIq/ѥGԹg&W2,77W^ʀ1"Ekacnӽl GКd3t XQpM, D𙷜/()6J&Dmwށ[4&\ -Elۋ!T2/Io yk2(Jd’ArjSn#Dɢ]ЕF!K@K@UCіO jm:5xfǧ"m0Txc})Yҍ5R@xxH7@RADz;vZI{^MXT#G 56}D#8'H,j,Ud c..Q#)Vcth3)֌iP+.b WZ뎢҇vg+G%6]4/cWr۫`W|MuUOħߔf-Iʭ 8O}i9xFۍ$R^Jy ibFںp4yrQOȂ4(\=e1t,sInvU4ؒH7]nNc}ojqʵXvyw9Gv:t}_5򂚑^Pf#7ϸw hi-.w KXMmq!:lU.JŇ(D ªug:$lڽކ^=wI\ޞJn!D_NxTԲG'WJ2])9;oss}R4/i阾ϹS ;J)v4nNܺ!/izp4G8~bx#aĔT#Y(t\A ϓGx,\Rzj6ݝXŌ@"9i)FDW:*h2 y,9$hNdtڛbgTh%8Kg{<<>V<2 9ȧRvqu1<ų vlaI < /PD^ױ_ذheT5&Z>tq/jdjK#D[] &@aVh)'9u /$]J=E} 892kŶZjլ Pud׷q{'@E#".JOEP隢Ym{3"8]hģsB>FBН\=":rݛmӣ}JKKq9qP dir JXYt~7 rSސT!:ol1׃Nfa0lO޿~B4f˰eQILILӃEqM$LExBr5f4Y\ӱ j֩-wJi*2Y nE6+>/*z&> SVM]/c /_x*<_B (~}Ot 􂵂t%q72J[$Y\ע5ArIsGM.ʽsp[ŁdBeN\BYsM񟒼%z%Rhe}=?ӪV6NR6/%]BH+]f5V*Ħ SbF3FzRg^XX5ZI0 >psCIc>dF$JF1ylTYN:։:<([6U Vރգ:gn!-^.w&j^ vwCUgKk ^1RG/x˕ ]/V/NFz1اM|TsB?3f)|"S^{+¢De8 g<dɖK|xi^EuQ"wGvΚއ)LsU27c2jQL ΁u>V*pЛ>6A8M @KG5U,1Kl8w3DKN;Ҝx3|=Scu2 Fql 0~9 u1T瘢 $wxW.&7rDo^e=^K#U}g):뉏i~mI9 َOݝӄHJIJr}IXPTkZjlxbVo^Ŭ8#jճIihtwPWpTL[Ol"*X-ؾU͐F0v?0\99ᗾ?r+'wؖSuq UB7C|n\ wx[V:*:?z \V7@_yNHf)FӷjY|e xeE6TfENnͲ~z6 Ww7UM˙Շ?; ZߔK!;<p+KY{Ŀk/UOVZ_w2t(^O3.J7 [Pj {)aمb_YaW [ַ2<l橔@5f#e=Ff/9/Y^??ϕƮ˟w ;sg)jݠzź8-clh[tQLu@[Bܱw^(Wh\N@S>(#p.H3fhcK/)eRu @?T6 ~D-s|eFVkI] $$DԜD(j';4SlLJQ1Awݏ$~= -tLZrzsR}e@~"' YƕCѿÖF礦>kS/ʘ~x&@6QZ6KD︗ma GG[ h rJ,*;o-svCk%fPU=k&;'ISIk\S(m(Z{ZrC(+ o^UMuePUFOVW=*҆.@*Y]EͨÉs5PWOR`[S1FSFhVT%&We^ 56\vyp/צ4 qfHgOڙwk+D2";3LpY;,mx c#}/{ƍ K/9R_\=6:Wg_R`d'9" )1#_7FO۽x1YSy0EҮ$O^ EҥYk-!㬼81(鬩jq"I=kN8k&&ҖG4fx -?|ao$&8;w"_isIT*!K,*+P $H6ȕ {P"ݥid /FULŰ:ꘕv8W=Bp#дyJɶ0'/\CvڵЮv]Њoi,UJT \sz!SxʤL(U#+x`ν3B' x paTf]"LgeE [cv,DSr|lkH)4=ƈ #N )t檦w3cG^f0"N x&ՒũeYb4$R$ V*#޹yF?v7psvqI {Hh Qy_F\zNBbNٿ]c`L/.) !=Fl4Y|DRj%PTݻ]/c)Dv_t廂ZRLX38| L Idf[JlfhY M†FG8F(I'`'⒦> JG(>(8UXp,՗s|kƁo8k D6iƇo? Нs/7]ofݫ̛|u&$, Zw8)F3 wPG?V 3IqnCX8]J#IMJ+:O<ihR= er*{{ɮnZ%N{:Tk}V^-q:x*w;♼T8jGTb_2G]t\etvw;X \ܡAp__M^r9-:_րS77{$W-Q?6cū5ԷosnB)w zO<\h!q:q{??3eQ0K7eg`2ғKEy2ECE4D0GoV]}j@ D'pZڭ7{u\[W5 !/\DCdJSzvpw GtB稣zYT$ W|2tɐh#: q!YXhoJy9C.%J_]X[#Y}1'Y`~jWjpKa"eowd4VZ3yl/0ǧ;.?%6uΆyf;ȝOw;_8j>^_\~fq|ZR.s,Go< И~ׄgpSBazMQ0!T#N_UBk<vͤ: }? %zjmT܁Z#8x& N[+*Ja2܁p/_PP |Bsڟ'<Ŵ# jE4H$:z;xCoxQ+? xa~HxU$䅋hLǹ݊^znNu[$L7nMH ђL O|njx7wQݔUwS%IV\ieyjϒ֠(坔nx-5EԾ+,Ksͯ@m)O=R4)[@.$οr,䝷5NS 9Nd:Tvq4TX¢O|@:y\륞E)!Am2KRfÏj>lww0 e*+f1Iy9ͪ?\/1h}*~O6D&ݕ\]B/׷6/*l2xCFfzlhزv bJᾺ_YRV!,ѨmB*q,iIK ǧXvYҤҔ&fhXҌsFLid]/ȿ})rr3ɖ}cZ>|f'wQbgy^?{*c!_??bA'Ï ߍ jxG= ݇ɬmt'a, q:dVGÈ(>M%!pN0 I3#Rh`&)õHZ=&&Ϟk5B-ļM>bCf>_$m!}5XAdG|E j}ֺMx.4 9_+uxA rd,Gq o%lw? lVdCѐEB{jNf-BP*0~(?sD%LN a|~#+B} JӝvpUM1DxX\#$%??ܾj$X*O`/*\Vp@__RQ7g X꺾b*3ݞni4p龊"{ jZZp뛃ŞP.$`n흷vlDcVxҸί}L~Tڌ iXOtR@m omu`XB f=mDֆ=i3 KN}o禔0`߃AnQ0k tMXK0QD]iW]isWHTs*">oD-u&yﲘS_Ymg?Ǝ FT$Rjy|~$ 3v'0=רI:Tfue6tQC򶥆$= :Cpczl&]=W;G4+ 6#I?NJwIJǽR PF:[ amZ) .tl'lˠEˠ ?C$totN4$Ƹ$3j@M>[$/6-,ǘO H3-ߵ3=RX+B4׻Q}eZY)w B7L|&[ u=?ݿ<O64u>^A +"sex d{ ϋo^닟[oadm`4:C$[;̐Dɗ}#JF&Rē`2BH}WAm ),Gsu0! *[|ݿݵ̅qZi4fշaF?^1toF!0~n!$XY>,o,t?c{}"N幋S&7iyŊ/#|3N~&n1-tjBiWrn;n2f9*]#%U:bvT#+O>}!L>Kcw,lo藋0p /Us=@Ց4h}֜zz~c,W^mmha>,C^?\d,tŌv0v!ļv]b^/1#nHP rM*j&(QRƚrb$%:##cƋWWyE\` 5Mb.rb l6-9018 `\r{,/T0Z EQ~i'/ ..}..}]K9 *M q C,AqWev8/}f4b=kÞ٧YyJ)桱g0>M"I ꬌ#ȸ^T ;k("zDD!ؙh#}֌רllk_q(Ėb0S`ϳV=YۻcB v**ITt,Qe,ISl( 1!ʘWX`pA-`my|{*LY=ѵ}K^< -foP ϒcԬmHC0>IJ'Q^\ jlɗQNĺQɖ Fy"cQ ūQ舙)T,J R*ġ7+ϼj'\|uĀ]'{dޞ|Ҥ1-PS1 ,Qs~0kl˄섌!<[3`;JC5<l`+dI^hT+b*Lv~:J>în ? +_g6ChLԜGw}[hBCȟne~oޫ;~x^7+}¡/_2%Վyвc?oء~{\+9'#!497@um?ݞuV9&ӃZLiQzK!i͚Akp6/~=R_t{bY"KI=Kn~N %㫷\6 ي# z`ْޮ;nXÍZ-e/hN6i`9n Cu6Yj) XvX gv8>A6IY,x#Ma͎R6RUET2`xHD>+A tJ^BwA $ـbOñy,/BM;nV3z#;J"Ά^dO9:M˭&IL 8m? Zh\]SVn0d]00#ojd&,aָR?k|KWurvN>B KCnjkSNJ:Ɯ.^$XzO%t)XǨ} > Q+J`la#Pn.Az~N`k{BMEK BqurRYTlA,+T%ƜXV: C a٫fݮ :lVH¨D@!Φc6s>H)@yQöga)=ۡ؛m0l EؗY5DVoj&x҄[w$V/XKJ r Z4FN$KWjcV> yeA ki%3!n/pbÉ/xH+tǀj;ES:SȖS1=D sa7$f71d+bRaW;# &qnYvtt(bŗU*.Sk2DW 6; 2NDpf4@b$stu`"iWs`c!2[4q,k#dKNgiUHA\ɰB"v0 [J nZ83/ϲʾ|/; R:+tFݸ!GB*ِGIUaGa-VY`o %?\K d$Eek5p,}LkجD`P±r3ˆWB+6Y+X-F3#F׵#SPF|9yNfT-n0=fܿ$1kB&u2ψ>Su5&Fsb6*vq? #_aaFnz'MNSmr$XNxlk7k_6rWS\r|trd;?Էj{2a|9S핒VTA@bVwRz~&hJ7N##jo߬GuӴf90+sZƚ캲Me$x 3[ƗZ q{cDeGuFE}HdgE/VK]Mw K4~$Z||s/kv;'SDf?wVXW-=7,%>\99No::UV NM _ԎOlWJ{drW!|8*Gʯw$JR^i7ZvCW-9,Xm\D]{\sNfRȗpe&kA} i1}ep+ɋlY,1&\RLG 9o]N4e;턍Ō'Ġ(Vf!1HrAKiC$Lps^n?8gYn[8%VSJLA.Gf @ex/N -hW({`fB !"w $ c|:ƶ(աX ($B'EH_Lt R'^6 C`/Z3DۭNrÛk}Ч j{ɤ+ޞ;HϞ뷱>56j:ccI;3}fQ~/ߣW-wFOS7n?<[l,$.}<:=_ա Npǻ%h`s7@Bqr̗c@_XvnK(alKo;Ck^E> upg ͈5$hgZ)K#]pfOaSч-P ?2mGF/>~яZ)snC&G?, N6~OK뚲7#Z-GjKl ][7_+̕XnyT3eE:榀Z63@ch{Ż.%7NN(uMPP:PV$ERL6}(RHJj«i?iJ+)R"$UPc,:ȑFrP*b!״gOLhiUi.Z^MZHma}k{j=Z+ھ 5㴔:I:[k{SeLF7[TZ% tmPnehN+Zm?gkE!jBY;&Y2FeB2 T֨ :ZkM5`c;<TɯGQd((|vOGmM%#HT-Sx"aƫx_oI^ff1egj ,i`$II.Y%]]H]@×ȏX cXeEʄ@;M:W`F^ id&B eyQo8+|7[m$Uldݿn%ǾxƸ]0cQA8sM>ʯF ʨ$uNzBNg_˳L h}p-F], :碍ڞp01h˦Ć͒C@ v~3OpN_n{҄)B4C\~l29OUbʰ~|1z)FY< 10MV"âPQP{a=C|]Bq8E*KOFn7jMqzyY=%g'3y%'(\pjCf^.VX7v,qqgMcŮ{w[A6Pݺv^NB>_lkP]9|>卺 7S20Wgs6Ҡ&qavY025?Ѝ5^_rRB ə |0׶0B1rr!ې]"'3Ҡ֙S,yXRr#*cQ3W!9-6"}Fƃ)(rGBntAxU-bw2~.p@>}Jcf 'I$^mpK_?\/ $m822uV8mSǠAc0WTNm*G,T d9SȺ41M;L/QܼXH{d'S9,[r'6O8jShjJ<:nekOBM(P2MBi\4NnsGl߈:Ғ+kKsB6 )A0}K/}f򬇷^D˴)iPy-E'v1m 7wBِr8)0y O:6MJF7Q.E z=\iB4&gl%wZ!37ɛ$wtȍ^Z$hƿ)w4VJJеcsSy|s/͘\QDj6Y8hY8ǭdNnLr7FM5~k jv'U3A!S0HdQƸ; )Pi;NJ`66݉Y >* Qj捬Xe 0=]oc:ۭQr!;߇=pwXpF:ߏRKK+ɜ\ !@+\bBĵ}8t[ L#VfdOXC"['MnrN O90Єҁu޳ Ɣww0px`}nRdiE;hdQ dfbI13 /Ȝti2jNHe 'G,Kd%JK^h&ʢ, zƽLR /U1v@ 9omkz{A1P|{ha+,aZPV:Ĉ"z{w{;LYf5St ~a\#<1 o}w."eBgzw/q 9r )<:>a_.tBX2ȘGMzHy$d62GgISp&Е/ NK)d[*`Jl-//PH|AFk[]5l/Sk5q;.$.} t?Ek\/N`T״N~X>5r/&~YF_j-SSx)N1FsתBo?  *-BI H?eJsi. -S:T$~!s N~oE|Ob?Xf>;ЇDs]G?-Ohad;,E9z'pWKh.5mrc:e*p7!m݄\LMm2[#_ʌQΐL9dq00ZYG.as ;U~zYĤx '_ƃIbQ <2 gnؓ;k60~ƿbm~>Sl8Gl bNͶC$ȡu%)䄲DláIEڥ/yն׫q^&hTsqe~c-(NJ g['}û/j_վ,}Ye]=omMO^l Xocm-IP^/:gfs-m]o)iEO*ۿv?n6|*uPqR%yTڈ/'> DDG=Yn\|証(5ѐ%0ge`%tgp&{QEHy!xFHƚJގ4ѯO0yGaƄFlU)z00zЇOZ(7ߔu:>> L&9}?4ro;Q`kL]{R$ HuIJ 2`"mRt,ɩƩ\S$eDHɓq9yO ' ckSAN)%%@nV2ȥxrIL'zG!JrWDQЪ{.ud0vQ >|ɵ|CC3%$I__Ai|ћId%W;!Mۇ㰖sX/ rp_7r\&r Ԯ% "P7nCP4jp9rǝsgB!5(2%` ڠJ^@ǻE 7ys\0ϟ79xO!b9N;UX]⻮y6e96eĻ?Ul;^dŰ/!V4q+F^x6jPk %)hXeM]_% BT(+CY+mXE㾀WXmIJ zSBJJUIbBJ9pFuUu9U G6 FPfXP8ZEBu_^Wz5\1sASNQRJw57$is3 ;PT&SXD0vF8ƌFLq3zf^ *[ʼޗQ%yg+xsʢ'nwsk wa"fΝQP^kLrJ*lu_T뾼uhNXV JT)*vg+9Aσhe[,!Yjv Rh@RK‹;/kL*h|A[cĖRUt>\.vU.Q@a YF&*<}1X2IUfr(NFE2y IrC@=-sGBXV 0#NYB oJ7;Y*2YZ*3^?Tx..%^[x/i&˺ i+3f`Dw`q ,"\0GĊto % f l;ŃrIL&!p%GKq V> 82TCgL*K q[HTE"qm\ t[REfKjutnXB͏ô\8tg-</'̉O֒ q3uk]T&OhOiuuF_}06! !%4 LX & Š@|e `0c'kmߏڿgY?6RY!ه e!mς+EBNWwÏ`^;a?] '\YW3/۫aޛ֯8OzV{:o:uw:;vt&st~`wogr 7?tro=G8}ϭ0A7-ctw;۫ҝ7./9BqSam;6ϭp<~ʯHC7Aחo;1O=/@ y7d'.ӟm邙Wnx6w>V~9 N>Y6x_xTuxn&N|1^_;w-~Ƽuqp~vu`cr> ck{> קh(<%cƟ[܆O*J /Rjz1O;^gqjލo`aRGH;K6A7N\0|p^f.==Z;{ڿ;a}wri7?up~=zћ`0?“Fؕn^9Ǝ7Oi~9;x~ mF_uS.&>==ux&;7GG!~+#5pwBPnq||^t]>af=:uyC&{' {'j{g g g g8rzeSw7\X`l jB5 Vw0N c =04\}읞+Vv3ʕn,D\pݼַ/S"جL'-eC*7TȪ+nD%%$>6|AY=BQ; v0#7C?Eј؍9PrZsWT$ *g7,s8x/2}w}sbdf³ ^*a :ᖈ%KJQ|NFN.<_E뾼KhVbNC]Ny<|u+:c1kdgN04t3LT?L7ST3g,.38EYBYBYB,}YT'7_>y+9dbK)P$Gj2Y;7vGd__,I *4#+d'^PI`Z@]$R w(g[6I⧹ Qt) 6*ʼn+!KP<`ecȰBKR^#G1! E@;N9KbE<'60t NSF\ V\v{C‚*i|dJqMIFP =RMQo6;&@~@9E$3PjIXJc>F 8a OS^ID/VcmtQtvnHW"c,?VbL\A1^h۷9}J)ޅ0'b>JE-\3.ص^ _>D02P7!"(c;2d0 +ᝤb\0p)K"# z G N'h<Ɯ%\9EE1jy'g >Yﴔ_uL<#PHJg5%Hi')gOJ[vRM @nŻؖ`Rq=XS=ȶ0cjfa3ʭ(WBnVe)VM XAʶnl.[e{)(|ͷT5BkLVHsv -5(CCu\V Ŕ5WluY)ݻW`LG_dWkN._j x"Jꬼ-Xkaʥww- vA]Y8 RrKn&eI#IM zSD*.ѺM"[[-I%B{(jf*T*W,P z@Rm3sMD '[RĤڋcU 6*W,UJ&Ptع@pDSJ}9յ$@@OZ0T1PJWhԗb:0Z ]Kp*^Pjb#Ldi*#P%5un"D%\@%yน#%f"}Oz,HeYbJ7ʕT~I[ D}rmf3H$RHlrzԕ%%F0TY*MMr"]9iBOG[ juR.rIچ*aKR|Lj8VA^Ԉ*+YL>ǎUr*}KoqL6E&X9;--"ȱ?x[#O7pp<^:)1dGԍ7ąFӤPoUg8P~ϿudpanoW56@cm# lP f3FDZ'gWkfuḁZBKLSB)!N% ۔jJ5%BM ŕC'w&u ƕ].!DYDАE}ƜSAfEĄHjĭB@j //y)4/Et}S;RE)?dMKA_M߳je K̞ 3"GK"EiC<8AdqfaXPKZ`KNCjRN6?F mHmCjRېN$yAS3'Q.SJLsC(זA g8PuAoOݮ61`ǣ4hROcD;)9@|Ք:ג!%|%X"'A jxPÃ8TLHGdBedgBUNP¿@((0!:Jbx)rUdza)RTJHqQ2/ S @ `w>cPj]#C\HD2H:E2x:7&D2HFX\$6D [ƽQL\*[AfHzpG R#@D$J3%`JꯙP}TBTlQ0O.|ꮴ~eZ+ӵfh{pwuqzwgYEViIE&@M`Ӗc}Xo[deԲg4c;؆c::f~sp\-bdf{BS ̐! lB8杽I9ܽܽ}8{5K6mjUN>@Lapy AYEF:w ׹kZ%AX-{VfXtt eo8m59`2/%;Rb+c7!*hKk4P)O^5^5^=Z'4XmZˀ w5<Eeč 0ȕ)ܑ}[;J7x'η{>ny+0|ݒ7uƇo g`p #UOpg0JyQm)Eu9pkc=M w)]Gkw.l|#w}f߽7r7;{7ؽYf cZB&*Is<;{1ھ1lUGRMsbh?#7u"Ncq>럫ӓ۱}δnJW +Dz@ X]޺ +>yg*sRA._>-{WA{WA{p*C}'.) oRGUǘMN~ i 16K5I %`華։;JcWY3m߻qGEjֽqyqFߟ b^mtd5i~zzvoiJ/;|NϏ~8))kSr}>G, DB0 a}R,zΪ~5t@Ҳ/eOLdܢA(X"h*zHR f]ZFK߹zfC{{}P6Ps6pͪT YF , 528nHuM#j&dY1γfLjtj=Wl.BPZEDR(pr2{&Aϻ@dOo d~bA('_O.U6gzO4yܷ}p::k` @)JOw,NJt65nT]O삪`˔ZP4iRZR|P~Pm$CMyflN3R-y#gZbDOFaY ofZcV?{To"Nr_eiқ(@XAW":Q U&}a՗0yp`bhU >j!񱺈Lzj{qF`f0SLH2e/+d @+UdQt:lMl'6Ӳ19^DN"ɘ"lT& nZuy\pTՕj *ߺwзjzwIo-≒ޱ#g+Kz0)]}Ymy0s,}獙gw[V&eBK8EcvAZ+9 }r6DHW3\$&Qo-zJ֒c[!/c(%q lF8JV$Ҕ)Mh_׷3ڭ;)]W7Nhngk6Aq6RΆEH Τ`R+1\2Z*i5{.Cb'p#n"~͞s86")k3z+Z%İdydRaYi5ޛ`8E~Bgad4틑8_wtjY-%Sq\?v7.2 $ُR9$wS.鍗)SGʍGC,bXժ+ >0 p)aG{+ =kMIƐ<ߤsJ.wNh40scߌ=W:nJ owuf*AyVyzqA#FĂ x^Rӳӟ bEJ] 4UIsClJZ4**I[6wU6d:Og7(*_RO"[ i?:v]_ۧugW jtW>䬽n"%ͶlJ~Zݦ.٤^Ǎn`o{<;2.^z!RӋzwnM|rv|~t:ɴ+D䒉.5ɂ$6K.RTU!؀vk>2=Z=^wEgW?>r~̓gEG/6g)a߯p챥dU HTq&D Vi>Y^ֻſLxW znT3[7cFxd9^+0UBEa3WhpNvOH D <נz{^Nλ<̹"ƨxh-"Admv2M?7: L~#;] q]cX.nݐܹxs;M0}`̣?3 gVww@mK&['o:rdP1\cb"xOj|HCpC(jփlKh +ZĦ0UF Δ`9 4 2K=H}N(yĦ3 IrL 'p@webe1Ҥ]oF2f>t43FI|sJw.} 5]`1:bl Xy=gTeogg&XZ@j+T4206V ڻcz{} x;ڶN~/의v9 {ˠ[wamM81Pz?@:8ubL{U%K$*?]Kwܶ+s vf3ڲ%ٖ:M~dnu`@&HD" zvL5~yyXƳ'2l5kh\1hmG'Ǝdr2'Qm]>@.,a Nԅ,+*uXևJM^>EYkjʋRT؂kWK@*% Na 6!#B'?hI2Ѝ}5kڰ| zϯf) ](z[A,ic> DFj+58.7K5jzh/8т/i'02v8JFT]cе)>ڽ3h<)CEp:sQA켊m*UME+pY8!Bb[Sɦ6Җ^V!Qb2[\raNV Q5rXP(6-RJ]Q f)*12QV%jjWy`֍(ZDSU-l*4RQ".ᬾH$6)ݑIcHFLU0*,hAV$ҲZxX0%K &ݺ&t";ru=FdBf-cNEn}hgE~Gp 4.LӸBKSTV=<ƷR7NJV^IJ*Q62)m%K_h2jЀ\X!u:yg,RUOڷ{"+3KA(-}Udta,P ]} {;J <?D@M#ćM RЯ5MyNBdɮN}V \d[kCRcqjC Kx`|aoC#ư`PYޙ +/ 'z9҆E F6@ɠ17 1:Ȱ0%;,̊$( ~`|aoCj 2C]L30Laza PK0SF A׬֗&%1rt\v5cǑKx҅RYގ[>iXa9QGi{R"!яqZo4 "a$nC{$,eR%{eԃs=dA6jHՐZJ3`%qPK=g1$a4pd ZgQ98*ގO't(+8* UK0t/b32L|@ZԐ`÷+rT,*1` Bi*l8vZԕ+Pq!\R)?,V_*sIuxJ )HRծ 囕-zJ9\bpu/?r2Q/Ħ8۞NmFMKA6B5` WM؆1l4Zw9Y^i @cf`~0LNڍJ괍&rQ7(%]ShB7y8G~7A9l$Άa+e/K5>lB/4ڈ:k[D. tއ\EWFF!ٚ&{t8>DOmtwU?pG￟yx/>؊ Y߾33 Ccy iọ䘱}姞IEۭ=-e~;}Q7nD(?~M@9[L ֙ tNx??wpg,;S7J#{TkYl]N/e C_ޞѦfԴ;yOoypyDza2¥Z6lߥ5yag w 7Et)d%]9wp4wh5 3~(3ym͕hл2˿4%iMIFauV,1lsKg1PiØη,Nv|%?|e[iuX-T+KZ5K/yՈ j4.AKc)\kPTθ|c_4j7ؗ_,drɚNl[ bڨZWiE%BݚӵsTyŠƴnjQ *ʊ9nL-_ %kpP061+xGSSޚ"`M_3rkk6ڷGw@f0_Lz$(TsNJ]+l=VW[_B G4L8z=6]d&t]V~WLJ Qc@Y3ߒ(dN&@sIۯI[QRțjGɹ\J)wdk_,_HK]r=R\?4'rJ 9a@ҤZm ="ѡ)_Nyٞ.d:jXPG њҶ l ] dKW;#ta̛ɄIJ8I/aIPЌ#QHrAV6SpHLUtsIUG)1ԇDVArBd=ܶR R:d=:#p:M2dqX(%] 9 భ9S|yvh _N9:#c@O*M!*L]Tީ´a@Vc}s ;Y4QH fO_vOmopyǢa*Yt">*gdd-i\b(}kV )hLu4oV ]k|-! 9kJ^$PlػuyWǭޛHSj8( }>l+}|#>?>wovo~r[jhx綕S7آ#.;2Lo~+!},DLoo! &߮]9b0N-AucB-4m %ؒ'YΚyr^s\ꕕ1 ^bumxZ8 3NNܢ%a.v=jgN'ԟw'aD29jIҨ NӖղI]8t1t>:Ms&SLBxG t'iltANf.Pv՝VŢlJYZy8=3k@1>Meh^ř|B {Lg1yj9ʅiԅb"ڕP-Gj$#w'} ۜc|/rN_,  FuB9`l0`U, rW<%cguȎ0ߥYu;$8vVk+{0AœzWL0e4SàҎ\EVsjV^Gb"=e!s_鰕եFf`asW_K^ zpguS!PzkI1H5̧ucm1CJ~k6v:@iCK2@h,rU|@50)Id!i?N(5um{[?4E3W~9 ~tsZL5tU:!ykʧiYdʭty)uiԈ%e~@ 22_?8O;:!0 n[ԢnUhE,4J[[Ywٷļ2eeξ1 3ERr }Erf , rTEd hN-'@\1ҍc<29q~AMBqxo0y/XD ~T E*gEQ.X&4|U4k"g FB7B ̬z/)y.ӊ_MdL&l:6Iow,̩Wrm@V?Vèr|b0nVsE~+aH/Tn1E\/; l v<:f7-旻E=u#Afi,k8``Az[Wj9^!o\58b^uUĐYCG$ hmow0$J#~p[qrO)IOO%ne@[;(A562џ>ՏvOw}' ßlݙw_wchv(Yg=C%TrA8$Ȱ,[ET O5FebzmSxg>WZ$=X(97){WqOBˌ'_n&، ߖ{X(;Ż"`OSTϾ66{#PhHzkcz?ӝRDkxk@&+_[h6FXS6Ndj[Q5k.wjuj©a&_Y4)KWNqIͷ9 z,ŵQG{ܴ(gsjN9uZ+-Kpa31[Yĵ[2󒵷Ihu] ^" v7J]u֑ *7%5C辜lB@ڼ8p2_FA$Ki~g׻(իDկTD&=Ԃ&QkTеâB$WUFʘ|kugc^rN 8\~xO; D -*7#w/-45ꋈ1.s#)H"[.ˇRpRK%,EhQh7ǒ c3n1aUtjhQͮ&vU]ٹ5D,cY)&hSU5kkmW_..QiOqzEb7$*ֱ]Kv Prb99"7pHB5lnoZ.qߛY;Rbk7Jm;W[1_bAR#s]-8ےg?# \k"^GQSC\DqԪB E*JнA6RЊ\9285H)Hj\K피4t|u:UϫMnRR>1r0ZR1pOkyAEO]*4BPB5O`ɏ{`m)\%K9 F~~[Ȇ)80-_* %bwHjxRFn[`Mj 9ڊ'-t6}jZBחKӘtw&5vyVl! 1ѼTq2nL|ԽtI" 1QJёdRbY0O)8L-4tݑhӪ4?dN%_A>aa%: tKDyT, a6R/Zc8X3yFF#RXżu\y[*j]3kۭBY.QO׾g#h {H8K2C"/M\LH0S륁6 ui}5iiٱSQ+< %87oF;nvΥ&Y6e!lRB0\HlP"4>Ga`GRP(DZDŽ>96n(qZ,5A [VnFI!1A[ [6 $_iVg+B_ӊÕ+%HiӢ97~iO$c"w7%&'#({$ $xN<' ( Nטy8bw:uM |ǎIÿMcP-A"x:>WnRܕsJ.-w84xUhgzf-|Gv^xZjI~%;^?)/Ơ='G+}r BМmU$%5Q7Lt5XUsR#S8dE^$1!T lFy%v賕yV1lz9QZ~6\)^ q0,] +ln.^0T)-c$CBż) tæ!r\*QۮsoZo0@"$" 0О+}dWgqq1Uˮ{񞻵 <ɦ=tGq/9^'E I`}pLMMRLZG % #cEnFߜ=i nCg&hu-Tի5cH#QsT)^PEK4 b٪$HI K6?jJ ?sg2itṢ$+Q-#gYݤwy5%mw; j,i,]OI4-9.Õj8m%$r`JɆ[ڸY^D~6kavRZjx$0c(!`.sGq}Do?6Co8919kfJ=&5ߢ~?˰cq-77M3:%V -ÛucֻcGܻccS`/jInJ9&h!ޝӕh)JkY5!ݘ guvc.LjujVZg#x<"QΩ&*M\o/Qp,,A@|M.K]aɂ:vA֙޸h@Q1-;? Em:٤TJ+ݔztjA\!*RwVB[(̈́[B\;H&#qaj՚.v>Dndy0ղ,ҔVtAX"*F/F[9D_FL< <.@ 6m/A2%!&6q5a[+,އCs𹃸fd[a0!.׵Y3 7(ָ0\ӛBp8iP2wbӡ |ӁϽOFJk5v\Sي*~bc7ܟ2tqI+t={F_*0Y#YFk:[ 莒4r&KK^ >XZm˲J,ElTKxLMt(G_O=Ch-W@eZ3\cBe@ &5'6ۨs-3at`fs ^L QyG4=@WZ \E&0aC( CXHߪm}ՊVjV^^>NoJ\ѻQ%C@Ú ? I Vh΃np[ɞ;ȋ?9p "Gc8u^,9'M#c^ώac;`;2Enf׮/ΰ1ǾI: /^ { y1bAC;\tئ䩣m%w&CgE3th6J7/yOwo_~ *mq`x%.ua$2c=ecWabxa<F0jv}:~|ۇB ܅1\0ëOƼo{v/:;HupޑEؑ~GT?[aG(wBpe{BtS #UPttWw3ro(ќWxznFfs$3ϳzv;vIB ?<)Qs3?|KI2>z_+04{yė׳ W=@x++ńo{t8 [ (.F~ܜNۯ@MGyօf`|B@*]= AI~:|_ٛ7[ށր†9oہ&ɿm{8!Jo~?m'w%|gnMUI`9FؼWLb1(;5)/\%f ί2#}އu7.$K97 φճiˎB=9'G^2=Mw44?;e"uWrp9Ft%?%4wz&C&kŽ,d1iz\:J>w P8?!BG!u8WpO8#0ε"G;{F'ܹ,~GC2긒܅q=ph25 4e5DA5.QUI~ߒt  ő =Y1axB!È/cgGfX k9ZvO1ч[㴱SQÇr: kt"~Rn-Ю~()IiVRs#E6J[_. K`mT썊HHf.@,YX$e1{+6xet Ċ;nQVg2SM L2.Z 8<L)'Cm\#C-&HPBv|PFv->B~wd?:}fȳ}UuY_tmXr]ki)k59|%=eX!8%nh>g"AbOɢ hGMJܼb(Ӛ4bTN"1ie{1/IҢ/dB`B]'RԌVT(D"7)Սm(X.EY& `TzCڰyXs.Z*|g_.'iS MEj](P@\MܿR/%Cݚ/VB~9 was. + mA fuW[>b;! ]>JsC?z;^+/RmM׊0:%A# ?p? 1No?}*i'~w_˳>|~qȫghnz{zI忏<u3vߒ>w"qvG>^]eV5l[ X?#  r[8zwfl>ݑLAE,X-}w,XOT&6h:^G'b3lU_Z' M 6 gpsų1|G7Yqrpxrp: ;+8?/0tbo6=|V<,yoxYD}Qo~ib\KY3h𥭎/?2 ʇ Zcp&uPLX۶48lu)5Z6L&c.d]ȌC mKOg? F~Ŀ|UfEխM/Gݻ4RDO5hA;ueSj|'i<,AF,bc.Qj3|J)\&2bSt?9'twm<^քm2xk5A**bㅑŢHce'fCȤ샰AʟF[Fc8By=M^ wA-e`r( F9q/N Bh֦蘰i?@I84ځ'Ta{H (t! !*t4Z )"W U%r;ч}9 l}uWOB7BDvΜ`!*CQN$.ݸrKeДDaV:EӭF4+j2HAQvlE܀Y]L)ʨ%O]TR-DP{4N0:1<\Vo5GouWgf\^k6߮wĭygzi'z꧿C2µ.xo^_pF fZ'H/^6tВۊ@&oCHF;ev؁{VBtoztͯa#0;ti VۇÐp^ 0hڡ3dY9 l<&Z tZ4iۗnk)E=k-[xͬ71HnΛivc4[g^;zAۿ_+;}f8{/k.-FF}u䭱/gMaղWZGS&-{md9m.٬v{:ƓH勝ıCpQ[NɄ@ .RR*`攂ӛ2WR(ώ)*"+ŷu&kG!N[=1\Jb1f#Lg.T-a P> (Ĭ8i6&ب9QON7m4jP\uza/rg c^6Wߕ¼+bLRzXZxō ōk7ONFza4RziJ:ɷ!b6>*<{JklE u&ej c' ~e!k l7S0w*_\V"o}ny a# V\Gm2rᗟ77.Mjnv\lСrwQx7?-8dZ$x&H/ZzoLׄgZ:֙s;pn=۷m:_~xݰc|-1ԙ[5ǿ5y{ ػ8n$W:;[$,O8v=rX:PbM]MTبԥPH$d} $2yIX//,AH83@̱rdQ3U(kg=!NF/8-+iq6xH'385T#̠z/3݅f3MB~8's4ĕP8K8IΗh>2xۂx%ąT\z|In! @/Mc@7& Zjih$܅$`8 #W-)l+KB\4݉.Tb!sQv~v՛FUjT ϥ"ͧ$׆̸n/n#VYˊZ R-}>$;~"& ⳚE*tG5Z}tK|F3'8];ˮ_\,Տ2/\aV8v/ kY.Fiv 9ʢӱ%ӻ W*2 r`Rk.F0CWIGHr7B]v܂2-Zp5f-cUg4y)* H<ޅ[ T-A:O\_kL,Nf|OBWK኱I;=FjEmR 6YL]V ]bkP,Ziޯ.>ӫ8ZN}5jD]-~y @ W@%D44#21O"ϻ(Z\s3D0JTC7Ҡ47`͕+zeꪮd'j:E+nxMOdրbcw2IFh&hJ2ە mhQ`ʭXHkcR&Fhڦ"Z qK@8Z+%$sjT{մf uaVřBeEJ{J=yrS_bȍr0%ǚg -dl:))<6k$r\eVFtV@ &5i qhhcul.wqo&_OfWmNX@WӼ8t`3RY/μd1:ŋ0CQJsޙ 93ή\`]I^k MaI 0&3hjʤ[m;5cEҮ-A$rcTőбJJdc|(~1}E@pЖ(SN -hi4:6y!vue **Wi*x☤H"L4aXr-MD DX =B,&}Ez*Xaht uX:Zߝvp7+gjnH$Lo 8K}OApN'!SGI'JVZZIh_`qpm$]\ӠUզƂ<]d՗߅^抔[o`(=No~IpP͆ߥ$+ѦNU؟LF$5pyL.{Lw=r=Sv(:ו0Fy|`9([YkGl2 r!2_KI3!M:]̎eeliomnҏhT2FRVc{mr7 <(S*x]x8rХ%BUD*/_5TɜhyWS7X*y8}6B63{j<[kpL_.sT Pn@Ƅ$JKf9g~+r֗3Yh=l^G>؜vsgb/Up&G۶ F;URxqX1nQ̞$Ԛ"el>|w&{!`n3BitlW*`ʠ4A\%W-X|.-Ql*N9}ЯD| ;[S.(9qפg4y\ʵ'+@Z*&u@kAYW~v35Suj kI.6K h/:@3B؉-pw'ߓ5na<'[KR[HQJ$2OBPD<̲1 ]xRt7qzR om{A/$IOh"d:4X(XYMd*&-!3U Ph7=jM$8"JX(q)F nW1s 3To穋ླ7J&NVx2 b.i[>.(>Sz#OٮE2WW]٫ XM)^0.Mf{˧(QZP)*hF[f@ 0+JQvO0=Q\?zdJK-QN#AEF 712IQ]-j !n*\!bqiO7.>|3ǝ2Fo I.I˥ wv5+Q {l׸yGF=M 1t<͗).SdG\g3 go|aՏO#pmjNT{ԌPLfo]gP7ӦBlVvT͸oЌ*ɩ$&h5Ļ$han WZoP͵"T5$=א@;Fͻ2 Jrxe-hF#!sTWz?C_A@k VqtT}OP[EX m֓4h~&hl`zoNyxe߀TET>t1Gw^e{V^r6ȸ0yz?~f*3ZqpOrWiVYX<ܚ%yR1-\[}:VOi&|Mˏ[r0IQ-.A:dGo _=|o$Wp>[.6/G]Yʬ2PrLU4Geh/\WxQ 4]_0hRb>v5^]-Ke ~ގi7T:F-Ȩ+*G bZ X(_KZZ݁%Tb7T,5.XRIZAW#rף'jTd:GF<]jTjԱB|VYSnH7Ir]'nU1Y:U("ZwDVxMAsNmNw(Z\ &VP6ќƩFE zU@[\Wۛ;{VC2zOtGl6~g'!~b~<A<(Hsu_,.s$'3Hs.޴#"Dor fh gYy T68GBg[X0jk~5O"UJj@CiBQ:~XaBqo8½8HQjh&96BQE3 .zw)2IbwdnxVJn~sBX3pS7;G5O4=ŗڇ7ž ϭrzg0ōR9zGȨw md>Yg3 y}`qAn$P9c;"sG&T4t=p^ ЂwMkU'8G!/tWN&]dSPqB'c9jL" .ðkajn:/". >bP O|ă!t9hSTpsIj{pBR~~#ɿBmgq;$6_nARQ")J>FPMöDr]]U]6rD2ﮢͥlwŃ#h/vz~uStQb.,_ FxC9q35rpAϸ0*SfM@D,H#zC[VS '/=_aAv6_coh L: V/ekjd+He ){DRZT_n~j%3t i>^!f Y {}ZF_?wAcDTHrQJ$ũ42:J^ySeu j4w|ZOx~C;%#gv\Q^dco_xBªiJ`pҝnC|߁8ꮍiJ {q|Ԗ+sTs@9SgV^hpz{;k._k'#XI[|MCĵ^Z4Kh<l\xۀaX7Y>n!A*Q>BJp9\mجl%mtنH0p gEj" 9:7Nd_`kM L)B5tCRe؁YrSaO]XN2DZчPdIZ]0>GZ|11: {ߘ6tp)2tOFiГ9Uj|H/ N p0De%-O;$C $f,m6j^K~"F?7۴No! QvGɷq*к*,fZ$7q(׆G홑=\RĮAj`-phvYfq8kO/ʫ9Ex77p41t X9h W5\fCSt}H{b׈~$4_gDZhF*=}w2=MtRFl0qZB|<٫ng剦>~';fۗ?8(茀75!^@a D +@\Kj7H4x\Dw xz_\ZFsIRWHYLO*|t $_40M5L5L򶲁. u0.d$ X*@Aeh缞OsJ?-wO)Z]']'ߝfx͝ ɵ=Ԕ򷿶 Bx~b-GH: 1cxLrI11oMteCy\PMħp }&}V] MXj/c]'$ƺN3+kXcaƼNUy=w8Y<8 Ӷ-`;=t狩3/^= s<PKX)Ʌ/ x4w&a> iA}kWڨ-m]i{L?GSEzJhL&rFrr+A[ɔ1.$lAmDnGD Zݭd~{rB2C 5AZY!1i͌f Ԍ;Y e= @9^uBP3(o!{VYʞ7(W/qKF0 o`u5c9FM9fa9i@8e'n0 *݁*j/f |qS&DrXJ%6=;y5#=qà! YNTA9(hGD0Zǘc@{yN`(r E5 H)7Ҟr MJDqh!CMԽ%˧DS6(Dcu떏z۠oXIEeN8퇓B{ ԊhpPQ. u͘釃yVG;:}[.Jf\^]f΁^X Ύ6dJ=#UNR$r&JTss,i+wm9{)ioնr44 (`} w\UJz=֫%kMӆKM0%)|vk.{# _?ِ("l7q(?{ƭcc/='HlE0`wmmv$A%ͨGjIdI dc ]bH`쥲{14\ge^gej/+g W棰׌Lu5^GM!xiīc!J!&'oFW[n1bz2J/`Hfuh|)!9^*\_:I{[ $D)!sNfuEKR"&k DT u͌D cՆYQ(Kej!wvX y{<[]pU|pS\mۻjq2uz=lϷף{i75Dv֮0΍pb5Hqݻޝ΢}KF*J9qd)Er{b>ZdMUG$9w{l_C)w(X*Yq?ln1OSnX-[09dʏy_z ",.Y"BRKӣ;y[``H AE|_g3 g^yǃ|׉ \b1ƨ{!H>o<hx?[@5ZlFDǹB{; 9mH=B@{5Z57z"c$zc$g:域n?/K#6'$Ae%wѮ7gA PSkF ٚB##̷ ^~ R>}7Wb (Ofz-e61(x#vG0~Jz55JJk'uv?[UL]l_gy3j}xvqF 22uvG[ y5ioֳp0 }cPI!}C(IEJp 8=SH a'RH|eݧO N;H ̟g &މ]T4U[r s^(>AHRŎ]n0̣)&g{ /@'ݝ:+}?iz}Fw>l˩0OF, V \/,!HB ;UK%><#r| pC@3 ?~qRʝXLn4c1_]ۏכ? ߶JO ]J-š~[aALoY.q}+q!!AKUbziXxz`0lyC4G,PWc 8oLO:iqSVqqtBٞ7l)<Z^driM]1,}A Oo9UF3U+V=]=U@D.BUޮR%U;뀫U|nﮭm%Zi޺ᤞ6{P~fWoٛif7n+&۝מR L4;)"i'Du fiJ)]Yp՛ Ԝ[Tr[1$dLNA1͓AatgTa6B fYI]3R۪QFҪ1UJ. #CJFJɵZi(LuނK˳̝GfKu Eh7[K6`fKAP]!El_9L< *cgKG1Ev&65N%g(lx;ReߖqԶLʁ>K]rA!EXʁL%!+ rИ Qo0DaM$m*`] ѶʁyE+^CXd>?<(zSH'?nqze^p}zGa&4úY 2.tUrTOWl4.hA!Um*B85Zْ#ΔMBLTT#ƢN#3s, bfKsmn+[|,l`GO b!7p0l yrtͻllST>8?$厠Zl3W7|\M ؤ9}Yط3*Ti0/PwUQZE H7 [c_E߬W7f3w,_~ϧo]IߡBVTUˍM. Ԉ"XrNڡ]CwC}O34s^QUQ'.ETA)J"&H,E.E j=bI;KW&W]ltV]);KTQ~en3@bue"Q~v~DC;tvBSE +s}RܬfS:ng[)Z. Dd,޶ ׫1%"~36%tG(d5"FĀ6pLǘQ*wQ09U;K)ek[oR~sy gw|P_Ż0fvw3+]֡b#7ȯ%l%RɈjߴt~KV0A1YqW;6=Jb"P /fV>Kn(\hh, %LŜԵjDS,]c  Ѣd>ZSs#C]/xAdQfi&'zFRw\}L4jvu@&IPҎPU'e?uvܼ9P88{iq̴Se%g̛va8_hp<.$É8{VC2RbFv}wyG="ڟ\1 go6eQq8 mޫ3!){/{x$sؾtyneUDVmT_GϘEϓBxH9uQ^c#yn]Xt=a <50A" r8CG+pj1\[QOg~ж;fh9ɠ]Z2Wˢa1Ԝ,sww[}_jU:bQr* z:SC'xv8T"S2z; mԷym;y ̨&:9}L{YܴՇzli.(|9KզEv6l'51񴻅ٙ!;\15mcm#mKq-(ڽN:IJ;8;I1*^)'E@h'Aۆq)(~gOpZ#>P"0/Ly%t<9kW#N|\臔x5cE\%09:^/9g4R<t!IJ/zA_rB`td̐6iKT">?Tqc\ xSp]뢴LFsɭ-yg l/3l,ݨ Rc.)wc/v2h y`yK} !9X"ڛ#7GZDZ9G=ŧc ۶JVn͖.B>e-E'U=ȵץշhQ].>~ |a`l7|Z^9ʹWεj'0V 4*(U,DXL-jD/ ,DƄL~sy[lvt ' 0y3W5. z|R.>Mu{2Lw|ctZYhD1ʬ҄P1Jy(2DkNӘxOIwf0xj` f vqGuP2^JF$rOM*Xd-.']mU;vdHIUwu ?Tx ǎjFW yFJ9*<ss_ $ 9b`Uκ"`͜hj.RÛ:ia%ړ#% nY|mʯ nPc=8czd\'t['H!5FsƄJ:%gZ7Fx=d;h}ڜ=$VHK*WOi/uM3!PwBx'/e'I+i{%a@0rbMR8D$bwDh",XxgTY>u1TFF1L/Zf]&SwߔHU&p& [Qhdq!MMk-9JṊzo[9Jp ыRυtMwmgB8vE ej^[_/Q&5>Jτd>zbX[98q@DP. 8d^x[+IBϞU3/u0tA| <":h8dt9Ќy퇽T%i'+u4''+YlN?`\ {r RI;b'Hf't h %_-G=/WMy=&ELssb~s4X'ߚxdWMZiHWm?xMx5Y Llp'Қϳqм XSQ;;+\ǟp;QZܜ,a%k^}AY{w|ؔ+(}^,Hlc VzFkDu1kՒ;ٚu*|.`uwgb8d%׸A=({tؔ(ZV1M*74=[EVzR܋v֘ʝhϒcڸ'p;CO쿧p /+`z ])=h !n09{hoNJݯNJk=餴^: EtxHʒt16dÎ4WJM]vD,ɚ0,c5>)vZst Fs:]^gs4V霻J/v?f c khtR/K䚦uHW<9]0B:d!gfU&W;V2>o<`휻:J;Kiu& Nd7#R+ųYa!O!7Zys+mwW+,P⥫ϥpYfkЧ.AQ{k3+.[:r?sN /"\qDA*ocXG@bB;,3NNd-wQn}+LM~<<~˫h_lt},c \vlVH1+ƪW<a6F ,ADsPX D\H~lQфb I $b擗/RSlU@`.:):0% 0i\`20J[ʼnQ1DpSaei!dD);Xff7kL4b"NS,̃*bS&lZ2FLpl֔V<:Nec ϯ lMQCȀ 0jru8F\Qz|txSd<~]Lz54]z;D=dPՆ?me~~ 3?XWbyżκ0[ߺLeF =ʋxNHs')ۋzt#lU/V}=\g0*J4;c^ I+Ywuv{kED-1J{@g"RAJ$3X #2H{ -ciius9|*+H{R aVjh@ fԥ&4Sbe1 ~/klׂ|zZjo{*?m:ȫr?'}=/+qZUIyh`3M3A BQp[)9Fxo1Ӿ^6os_ײ/ 5cr`S5](L%}zN\8$޵}bq]#%'K:;tyj<TMT҆nӎlG^bC6Zk+\uGv>&~fZmhil#=̶QTNGMs4fԛ4CXhHA-v)|)ky=,ih1Zn&z)u/nOXX}Laa9nVWȱs\[Po-{TR]y194bb{?MMՖoxm&}wZ +ʣί Y.~ E;.SߚxdWӲ|!ŨO\JZ݂烲?"70,7w:~ÁWH!R`Ƌi#3,trS ?'Wr-*&"dZwZ}9;ɰP {Bq E i|A"gRkRYy?xXaEzZڰv{yh1v1-<KɝZw:N=Lm{ze[K5t[Zytڋ3A[N7!WצYXkFɱ%e WpU%jw\,=mhw7"lyj4jK+ٴ63RD1i˥jӵ:USkH .hA E{mN U ubB~{ZI{aV襄\ 1M[~0_~jC]Kω/S)L"łQ ͳT.ZOj#a+-)8D#3&bXT ^i)bzʂ@n_uB[NI:UV1w߇\J Uouc{ތooH>~z= }п ~ "䦽B4WhPjҷ=|fz56p^-f]"v%.fyxB{}ʳTA+äfJI"_3|^ SKpq?BCoӏ8+ߌǣOkCA;ۃ/Ä3Sl8'Rb3ذ?\_ûֽ/Fw{>\*3t ?3eɣmڷolۣ3ػ8rW~ Ny"iOMA99O WIky,K#{CΌ\ӗ0,KiX,ɪ EY'][UvMIP\)imgMO^ij8W,WinkD>cXԀ~ۇ0dNBzjbܟsAl⅊yO# j s4]3oLO9 {s" _OsMbY;Ջo޾ɢ K* mWeEL +7+ߒp3^T@?":С$斷I\2V.(% s1\ʉ(hh1HdAqoO.T' w5(?'6$T∴ZA,Ǔ=QHh%  h,7*y)Ջrn0^;IV UiP bS%rirh?N?n⍛M E+5ԢB CS U@LYc:gU^zP2b7̓ʳ+9Y7:4\*|@YnN[;!o3kLDIӶi~Tpu3XqKdJҼ:DOtPF.! V%׀3xI^)RbipGMOd{0JJ)Z_;/PKT)a?Q1pyCVcM 5_ܓq%ɣǰw䆗7FW<(/PE./Ld yKMqz$To$J<02);Ry6rFA\\%A*wE`KN v.%(e~ĕyq:K%c"SVޒϛ]-Rf&Wiz ͞ŚQYQ 4u@+V'6P^^q7worT{K*K^ h$K;ƍ*(yL|72{JFZJk,B?m hO(l]^yE[ٵσ\ځ>DuD1Jto-=V[O|w^ZI 0 Oyz 3&z)"pWOA$?P,Bu0):d_,[*yK' Vr懃X|2yLa^pi 0o(e+pOʕp* 4"O NıNSG* i yM2ȸxjr'ji_Nֲ#4Ġ>_ 3pO8$N `+}Avb5/{ Bߊ8Xڼ cŹJ>A$*3 6R&vH7jdZ$S爋s(}4콭\>F]مĹ 44qA;7ӛY+5)s{Ȏ~}{(t$>[|Ԟhe-vppN (i#i XKj$AW)孹7Ikl_}īi+8%1UIMuװ&.K= x󗙒VYa(vE?y/]qCH̶[+6Rf3} f<6QKACe8"Rf>4Q ]TNXd*찈sH4Hd6"hoy}$ u Y!:Y Ie7T w%Z##fA2[Jۖ6cøpɄI 7 gbk ;Jiy:Rꍴt'$CLl2[2~Jnr1'gs~rlMtLǻs/NMSwfvۯ}lmB+Jvל>_YhY;^KIESg~ۼzVd>X\&;󗜈̭ܓMcabTFepo\o,o>W" hh QrA$ɥ飑Y%B%!LR==Üww?]U6na~=`N>+}Hټ:6?\Β s!$|愆S2Y.s2Y.A Jż"rJѠX-S["(Or+WCmC\h&8RH\"JzB*tF2g. +FaNLTjFGPj#ꇁ['@/u^B/Bo_.xEF)_ʷZɅeJy[I vnĒ\_dD%:g>JJ6Fih !0oA&gĘTF%eȒ Tݏ5k)kj _+hkm ni!RfG_oF k-8Gi)e ^sj%ѤEh#h7 d2ݩNy6wE* g8F Ɖ5\l{ɪq-Ӏ$426䨴EfT\GXRiN & -KjF1p-=>雧$l7i5<%pl916.DN_\ݫ[3uڵu-D:YO^ )e2߮8g>.ȾD d~uFsSyS& =,]yJ&ǰ2ʊV6MSwu$O7']'WA"^q}ڨgf\4<&sUߒѩN}.op#C{uxqVD Ǩ@+((QZ/*;8{tA%/K:UqÅѱ}[tunf p' p)Z Uf 9;Fh!/N#|-t//[ ,: R):xA.8Ha;ΙYOFH~Dȝ OQ񛆦Df΅"&pmb'`rixSXD_ !Z F_۾W=g09.b{mzAU]}I"]#*|v o}q䘷+ߡ @Ǚ/٘7n{Y_$jW.hrB1>:=/ |=gԓ6 ]+,֏bDݶۅcG5ۚ}`QگlɂS:. oJ!V.>֬@o)՞ZҪъovF+%}gҔͿI/eolYƖUU4) 4Dv?ql`H )I ]䒎iuGG_sP%(vo2]>.Fǁ _>-Ff6J\s3{ r\<@>t9|TPolZA*!n(̘TBp* MY0G8S$' Hi^hY g}Q|t Wr//nl%rVrʇLΊL`<5ijXe6Z^pAN:( YJ єY>B_$/wٕ-Xe`캩.9jԯt%@Kc2$JK!hʲB`24@\0!)=y~zT<Ԝ jxlO҃5i W!c¯~?w>>]8I5F8?>߃ ~BWO˿'ŝg(8=^$#B~zlkeA_pǺA5#3Ga`XJĦQq _+QzB% pxZh)޺AaR>Lnc֫+|uZ 6|xUKo7*8T:2O @`yzc0b"INQ]I E$L Rȧb 9v)8G@ NjiEpNUw'9@w&tM^kªnfi6mx4sgmz@!`۞SyV*MRgin>pՓ/iG?iŧ2?h=.?d/G^->`"ÝIw^}~g'V ab|vE܊dZ&ٴ)d^lW+WGWZyt@U4Av51~"{Jpb@ c%JEBBy De}4OW#?Y<'9sk^B;B_#T%4hu9U͊q(wr[šq F .3h׈)'=Kj*7ם;CS% MB '-\\>^@9e6e]nxG!WuJDҝ{[oLV!m+|loC5Ѽk'{q5/ %鰰IKng:=o*! OڡYvBBݕ=<ݤu.{~sZOF>Ŕ8zӝ/z2Q!Yw=4wz2`:e;w%"ixNj2Fcp{J]I j[t(vJR WĹąπ݌O\ٌXYBcEk׈jLm~WgyX[7('H1ƵmBщlv2ɱVOåd:[MYi {  a-SEl2݊$O =FZabɪ\`Kbӵ(]dD:DSD#/k휑54,;mϜiۗw-AgMl/وB[sy6Zߞ8ϳnޖLc9Ě )*j ʌ F*sL˜TgRk p3Qf I!T?xg;yrںv qU"X#bxH'=ɥN[^_vKSgۋd$O)a,㉂eRҺ1Ȣ3ME\dga^a˧90EL,Q+laR*Cg>`dgԜ¡:TjXT)E9Mijs$sX2pE)Fu հ|ؕ "dp԰Rs8|\'l$OHGPQ1J%yJwlN%mUi';eݱi("sğ׃1X{9,)7m"J"Ђk3)~ &5S-0p-[QTvǁrjǍɅaˉc br-d9NscorDuAeЁo uK߶H<5 ̕7 E NkJmnS|QpwL;&;8C]j8*[]R/r/=Zpd⸻U-[`HučKљ'r ,zpŅKJAظp_B`-)]২-/fԙn K$~*jaep5ޚ< Z|.:'&H7;jK j/U@jJ]TA)t"em[J=p_:5'ѾDj`f>gAW%8}o |JPl?5%4vUDn:9$BB)NX\S[sHdK-i /߷7YXHri a~i߯*)e-S(il⒔Ʃx,(|)7^(LSIQi5+CG+E!-ҵ4LFJhѻW4 )2aִfJ9ĤA&%Pkk:L0 ?M28ef^|X+޺kF._dUG0[~K܍! ?6gxB>̞B&h''#6,PHtpc5BxQL#Ēu&-1](b?kdIƂXk{2'ZuD jHNCバ}r^Xyh]yɷ~`p{Q8-g6b.sMba'bufX+tt!ni`ȶî T-Ik(iyJ~ozrivYQONitn`LmRD"fy/ՀOCV[˃lws:=/Tb"$a(W.uLyÓw|=+"ZmŠCX ׉-txQ% Dok#lAgrwAuPjыT+`k%di=570L߁;FVB*˥|S!ATB=O'[ EP˹ᗼF5w(̉Kx]Y{1TZl9 (y]prAk %::Q$B;zk֩wexۮePE!/1#C(4\.fڻ;:( a^Xb8@~|>,9"/ZKĂPFz :*H/dmHvW_CKP@Zi,k]P|2CCoDt lf0 &'jB"|b~߶Onb:hǍL+ +;U8]A5&EIJEeܧSe\Vm$&pK>tk(N⛽nw};݇*SW*sU[8 9𯙣w|gYSUo!V|ʸpNiCYRN4:7wZ:1 HΤKhHRUk\;Ғ`H)힞j@t_nۢO2Ri Z6TX) fs<2bV3Jƴdu哏Kt/qܫrY8 ݵE4Rb߳S-P.*%tD.aM;:| vTn,yYajԏy43u&L7̘I8LswN&(P͟">L_{uq6>x 6DՕK5se8H~к3P\U-X=MFsqr wh-{ nI[ݱ8 ۪p nF^v܅#Q)K϶x hy% ML8[ukVid#M9s4 o9ǟ&(\)3I0 =~Cc w{;Op?,]@?IRSCgRie !Rډ/q8c/[қ.7%끏O-\ObB.MR}@T?1hp[$dH "ΣM\GU&;\0'}>MqKx8(0 {%&v:WwO;{TEi27I&WA="̘uv&帴֊o/(((J_.Jç?!4O>b^⏝cXcuj:SKе|Cd@Ҋ:[r _n[UCĴ;c:|z\SRc+J%9t@PنD Q2.)Us(ۗڂD#9͑QR4 UNLOhÏ5s) Ŝ'gxqI Cډ7W\diŒGroqkSE7؂J%yk~zN?,RK '1ҬsX*āD02k!*?j67`Qn `Qn խnAay䖶13z& |T( 22 HMA5@9ѡH@6I`t[Fhh;ْ]y+and*„PY(YvPˆ^Y$$mI,0IJ)q"6k0(CeuGL"՜ztDL6=}G+<*$2:pnur 9᜹=2Uw ZRsKwAk9#)TaB(+@}(BJal<Ю "F<)(s2Y:+rd4xd3!t;xc}1Hd6D*…WrVGA)+ ?,aʚ91wW압<1D\=V}\-ѭU8K2Cpg@+Õ~RD0G!ޏ_7g{wޟIE/ F,`~J!o߹_gss;/ \ET(I(on2I/9(5ɏك>bPJ(!i 8h9S(8iӂ%Zt׉-b_{z.hZ9s%ڴ=2@8xWo_eT$ug{}0Gw4WiGu_ x?>`j>tr4"[!H עuk'`3jmڠ2T NhzA _o(" //dblLAj|lG ŠΙcnXoDyYX,|DSM^& 䈸$8_@?Q[ qwNBp R~޴#69#T*qpKMkp/m~!߈ef.CNiW·i/ մ[]׿ KG,y&î,yn Y.:]UjAɃ6;l+ ']; q2QŨxfRޜƾ%чۯ6-8ʈݏ8z]\Y+ywa8\c?Ϭ@/Ѿ{|GPv؄Нip:LZWd;9k5㽫ټtӋ6$Slx Ut,!v<~U8L.i>g  Dn:4Z6.l5Qu; otQ. |NWW۪j]_.*8!isMFMkp0\w,px \7*{kM˲m80"NjwNROe4PgK*ch/: >yjH{ lhUNIAcwj QRѶj61Ri6CW2snZ-?ޓ"aoA%zm2RUrl*.DRh2Z//Ѫ_!%4^q*j <%HDNzpG ‚h >k݉j5~;k4e̔_yI^~RVJJ[)VJU~-p1.ȓҮ]GJqiI:MM$hV[wZ[w|a^>@c`wduڜu۵ jJ4Oc-g2P5j2_: `2Cy=ؤR [Z6S&qhh-t(^ +U!BxB6M^j"1@C6$`c?FNJatpS{C1L99a9-X+ct2c!y5 uFP =]MWC.zӿ]x|Qhh]l4 J%f[RF*QȦHVfV((g ZQq.,hd=PiVI6Lw?=}(WW0<07_楊.wx!Gh6hz3!b%bK[B{(U-\ަ gT(Ilol5f0Xc{X+U뷞jgeʏGr^{`=qo.rN/ҳ/>+YVVwhaGк M;ۤ2_e_El͍~gf#`dT@MUht\ roգ;F!D6 w(젻l*HK()Q1 (ITYr9j1b$k>|!,ZSA gJI1Y DV}#/ByZpie5s1Dw5:=]%t ū KjiGTI؞C91to^V-HbἜ+/,YD[SZ]u4jGdɀ dd-c⎆#0HԒ49to]գ୴:(4gٻ޶qfW|s=CN7ɶvIwDQZvl %_[,Ǘص d>Cg1B 5'YzxS%y(h m^ӽsmԉ?܎%=t'5CsٲBm~x`w7X&`9K'zfƙt/A6]Gm@2 N4bNЫy绿@0F Od@d!B cܳPiE^3U^C?'z9[Щ' IAgT\d£8Jvu%_*{@ 9O HHVi͗] kǠe$ۺF[w~>{w{,8zwU\T`=j?/cBĬ_l8r2ΘGIk)T2'^l᢭g0;L('ݻx~+_?(vu@~nzUEŖܠ( YPz1A!3 i z KISz_1*;Tj ˋPx}?\m 7GEkpJjQ`ʛB&|[c<Юrʨv34P`"> ?/N8QD8 _6rBu,˭Bܙes9&KfÄZԖ_Hx=c W8R[P%Ϛ d|o7BmH۶n엜piu_ /QJLd+b:~x3b-m[H3M%=+ j[Z 7=R ~ rA^uǟRt+"Fkg<4UA/]5웡t[VMiɵҗ\`E \kt#s̀݌!dє>ERIl%Rp"YIS5\BBBb44u͑ CTRz2pQ]F!+ d bBP"O> =}WQSЕL\b{(d)zHA]:.rG0PSB( &1l,q c' 13Д:C,ޢ`X}ĸړe&ךxXaFPG: %a SW=s 1Z@quԓNҁw;w?_;㝧\,|yQt( t |PI? )Ir`gN7n=N 4{/#R %HAI1 3궻P0Rs )Ũ0kBߛDKdh4}Wk{T5,G;JuH0QQG :E}caT7rU zݎfHͻtک[_{w&T_~[nDY^SK[`*ǕS#Dõn{2lw;Ȫ?> "\Q!q ;jEG]xk-&e@.SgL NdVֆʦc!guA\P4(OoB;L\mm>&^ۓ뷇{5XGBF\SqDξJ2bڏBq"-"##gS2RA.0ґ xF2P_7ùֈ) ߟś'7=2MR䱷'7=qbvb\ޜ~Cܜ=P_/߽;޳WޞgۛﴻfaHu|msC"yzǝovL[Ӻ۞ܿxܠG]>忧eC;^6ꏵЫ'0Dquxze͖wdF~=ނn;̠ ƽxCR0H 'Gtwpv#[G]ϮnL>no d̈QgPpIYc"|RMbl,Uq'ąI؍Sq=Я~_= ;?Ak˳Кvߝ 2 ō˖'Zm$̮7@'F!{+d^o~<^=ûMmAݘ"4uʜ?F䩁} ʅqݓV}l{xp͟o0tA {-(g 2&홱k8'r]$אo{҆ΙVzV~,V3`X#'չ9M?q*ĠFLyXBen.!4Lᦝ A6]?Ke+ɗՂW8>0Aj b p/i ᧃJqZt /02 -- Tm̔O>Rٴ}:c (.q&Jg[->8|teB8(2|RL8K_Z>Y>!-Է& Sg`Wku2X1,b>Ƌo ] Dƌ5|(#J.AJGA$8Ϛ7A=T.}fngC& bV.q:fQ\ջwqcXf *gS9X˵s`&oe?Ǥ+X/l846xslp2B` mr3Llco[|a -c,Wf+sKI]Rq¸{iE_w4lBd^_e/d)߷lT9kY /vͥb}ˁ#lWV*PؼWg(<& "\^Z>z];7l7Ѵ{`-+nǥ5M@2jsx>t_4u6qaoCD ڄY|""m67A@#F4@Ҹ4/E$yT$*'H-:f{y׵]mg UO>Taױ _K,UN!\ק(=jnmhOһϦ |R?Sn֗OInK?JMkAs&Yct]|{~J?oS ߮/P'$͘4VZIzDLwG.މGbnNK͇*g=^[.HjN;̒ )7Q$Q_DQ[$f_KFZO߼CW~pC{Y5̉W/Dƈw3;ywC0|Va(^䚋N&^D9^CPA|_a0D9WZj~\*99Vmlёxo aK#-$þ1 t-8[نLVxVE.SbNb;/'sI <5Ht1E 'gg/;;} 4@ҭnޡ@];>惁nu \H2\bI3I>ٲe 767Yk*I⻆50[7ۏyFfȚ[ H#d&I$ʰntoR"$uR'@Hk=[?n:53nӷKyΐ҃!ehTTehHhk&y=!YǫMr KIVqN{_+/Aux.ei^y XnHxZ)+Ϥ"*ȝsQE*zR+pc9x3={|al|Y4 C9Nn/(ƍY^v#Rqd|;xX!x2H 2hМBr"gjfȚxG2G"mkA}^O<6Va/ O0"faŸ>(u# <8‹m  axЧJ` $4PPM]d0LJ}W~ .zQtXvjc)1l@nl-YR.4WY*t?J괇wjiyjZ>zD.iu6ʞFB@K,:eqZvU-S'Q`D]h1Qh:;Z=hCE#i9ԮQ8$8cBP\pj?5 ͚F:(ltQ`^gcq@6z&#v@b$d=T8Fy29qJ5Vi0(Pl?sFm򍖂 ֘Lϋ / jwبI:DR$U",l ׆K1aBqȕ︘Ma `P I+}?S\ to%p_D,01:!D+X N*# Z*:%2 k7[^&9Q?\3klNF]y!iՕ_͇7Wo\Ŭ/b`;DE߿DD[(*|_B!ٔB[}rcY",kD"`S3Yݠޗ*5^JZwi"jXsB5>]̾@B188o% qXjfu 7umR`껟H|a!XTZMe'Jon* ) 51_.!R5ޝ a6rg7sFol裰:ƆF hZ&~&'v 5KKIz<~%8 1\%RIr$!Ҍd3V"(^J*RR)!D\1`B6bX c"#SHR*#q~ҭr5(a1zoڜᕐuv ׶$ˆ9F`yY1+]Vlmڮb9Hbk֔R:cӎtUrz5+RzuIMG@∍o@ yRSc0 8Eldf}Ys 0eXS5UN[yn q X_5نW_3p;Mm$?dޝvՋf3iIs~YvIە74W^6i__ͅ&> (M9a`H hH']oK Fh&fݮЧ@}nL# Dq$Ws(hAnE"7X[(!„Ȥ4a8WQ@N$ س(rީ(9xN9ڈ:߳w~ڑ1"{ԵE>7/"D|ѷ!3 ,zkbfa{hǷ~\ˀA"h#4-!|A/o~FTp ̄PC~(>`f@ P)7"|6`#J[=͘0c/_ }9?lo_"GJ/KF\}=v+oۖdofJ0) Iܤ~rb!崽^]r Cxmxŗ ZmW6qw}e9|^b ?Q>SaM Ko>v ÇwܓջxwBc< k;J?߄4]o?Fwv:nLm65?{Kz5 Lab}6S oD1@`aH>][Pp3 )tI!(!$e&NH8vB7sCSҊ7lzwp9BMBRm8h|CGC?tPe8fаCf!tF-Ա܏ Q耻bgN |QƵ/'@H}526P``@zirEѯfUx.6EgǙvW ݀^<E=LgX.EwBHeAB'BYp 5Q:*Y#Xy7JVMe#dU)޽.>ʆrAbDz?zI JYP!aRmDiŅ-jșUBU aN8XTv4#J!e:a᤼8 RUrsZBTC4$F 9, 8By`^ ?|5+x2#ydO~_8)XKqђ#8ihӑy]>'j_rTP^+{ҢAM;; 0xͰ^ۡ^~3B- &8 JTXf\5ߧK Ȍ),"p4c @"z6=zVj{&C*|BQ!ybxCDp̯4 -2-?&>z|\xƅ_÷δWSx*1h aխGXrg|q%YybYf#^[[{˗f2+2Vԗ!˶]YL%n4-k0$ah" [G5̎TBm|2 i"tYR`R%&˵AC^;'kG%L3GE!ZJu&"Q/DhQ ;;v{! g~՘; <@Ѯ8k1R+nR8J[Fa r"ɜ>NSEKM1hGReR22AVtu?H@wQ1S4el/A-a(d˂SrA~DzӍI os.Nx8)tPR-kd˼X5= PQ'o.dOZlʓRĄ X+z!$| ճۻ/:1/عEyCLbִqX*)RMو~O_`lDD)aӝtNjM ?7&^:U3/e>HO'*ɓW/V5 +2Sn$GDUYȭ]lvo&&v:[:y=+$&$I8ɡ?P)c ȴv~Q@?mT2aJ,;l&+A"tktgYr8y|?Go3U|BH4GڂD]GQJ8L6+_F![ ч1ϘG*{SMJ6BWVGI(ɾQB)&H=uQȶՎ2~Y*٘&W;N%m<%?ʼnRӎg GH' XBIʄI Qq׻.+ڷ~ T D#4酡CczQcmF%@;=xVَ yв?vd\lݸc W4s-~{5$vE}ؾ@oݻ{WW0 l}o+]R B* [?wDM&wTf1 Jm/t9‹7WiAoݛshm|dzΏO>*|dDY1R~dhXE z:$/Y!,Z+@;MR9Xצ+`hHwhOΰy {3?6k3į=iH5'n _fgf[x蜨lh*?.,`@C"BP Hd=q$ĩ{\W$us|yIJ5(!)Y>=E.`A ,v{랞E-7:O}mm3:j4R,1A YՎ}.`<3hH JA<҆>hHMX66GsFe (RݳACX0F! /| )6WmK d˭yEq̩f2lJ붡dA!' n*$7Lgto~벀)Htu_2\̫7L=a2,S„NdJX*udCTD_e *dkL)uJѺq} MjU,{t M98QXԃ5V{02UK(8V!!+51YT tE lyNiAMBRn5*ZNX:Z&giy"g#h:;4$H%TjIIiQ s]m U(wBn=BŃʖAGsgp9\{j;؇Kv͓o>Uguuܕ(,M ,gZx,W(͒@Y7&r.AЏyjȺL|4l6VE(n63bIך8,cIGCAu: xgjs9l%M-\f%ŧPf/h5I68 97FI昲]¤ l\>Uς2J-6K9q$)TF7&N;zk64F;WqQ5ez~,ӵuGPWE-%-JҩAHHW"d/PL͹ϫSTfմ n$"s|VI۷ܼbB-0eRZa'@ȖJf$.00]K[r{닆kǚO&bj8 \1A{"`Lu \2ϒ@*(oAfN+Q⚇MTK !.YNoRH(ϊ@.s]ȥ&U`COMCm0Xp<.Bҵ@F#}ݬS[ZU\2k7O8T Q;#j+#GcQR)@IpV}R dSg`Ϝ. c&ɊJ93F$%k}}b3ξN~Kg>3i|K}TF1N<viM/:V הSIȴ`Ht(~G68Rnȝd/1qkSǫ{foP+5n/1 +isXֺK =-|f=xkƋVۻnWu.446ATR^ ] 1a5 RB )ټۏW.N\"-!nڵͩ$X76( -8h ,4RݰJ8f苪&ivQJL1bU/|WL\U3f0d߆UW o>oC<1Bq -".nJL;\D.:u'5da2  .kfB:D<܌SBBbzF~l/g-<3RnVBm) Ƙ)cѸ)cY pߙ~ 8kLՇq no2)wQz^ q2P}#xSUNhۅŖ]@S\5 ䷋0AoA'ur]^)\Nn+f)mmZQ-~(8򿲿}sU{MޗQ|yO79n4>< 1./j[}b,/~nutM *nZˏ|%)Ѿ$՜HyFf뫋&?ɟV1G32ct{ dď:;4ZO/loK#4sҹW[ͮAyP'nʻ;FSt< RHANLy*ٕc(wf1~p8Z0=)0j\;!3N&N|O )Sԓ`,FUՔ+:Ju*9jjуnKA,4Vg(ؔyD*E2I"LR +”v)HOXwQXg1օ% hNm 8 Rשףn;)R/i.h]V99pVS~ "-T!}oW7;ttxLib [,Gm㶧lRLZ-*fV"jmSĶŝ ۜ趻sSX[wzg '4a fp3Qj0}lx3Nٌ-L]^{Ն_iu͌7%m^z\m0ϳiilF(eV9VޒU;pp1 "/} PXmzL"M/f>qu}^)~ y9gLWM6r:ճbTgC(*(&`1 /$rJ1*$# 0dasyx=3"L݀sZS[UW[5o < Fdu}r5{_3?y-?BZ1%;bxm(K)<^ܳn'io̟ ;01͵ {~!x:MU?SwwwWcŮbH1sЙ-̐0U!w r{yah_E-#.5& RHH4}Y_PFhI>8byW3eor-]cy]ͭɨf.\*+{}ew,ǚ˟ք^5xノ~$p뮟ÄwIl՟|\:˺Q 9mWJ#qɕHj޵dٿ"ˠCEm vt{t#(&tv}oILITcÉm<}T{LJVHmYjnn$.edňeiNq}"㹇GuaMFáӤO/FȿNp1c ,4b 09)~>bZO d)Mgts`\~$kXXpt4~*z*A!z]T rl9@ ;률OͰee< Z5DB;IGsW _aܘo4htYfʵh+KB^Zh{s>e &U% i~}%8Vf{*hM*s]3?Wo-r%p+ej}T9spUZΕL+{sq?o=K\}Xߓ~2dq*TLZ-EF"#FYޓ^q>/G7W^ `,%G|XL77]cAnAz}<1,7p|cMw] ;6 h _Dg9wWeFP):< A2iP$Y ̦*Fv"hDD@"5Ze|: ⿶E0g35V30W(_֥:;=hZnCкͷQ Z&~~]ǟ$Jyb6Q$XkT1I*8$a#-e M$M_}>pEfX8٪9ہb0KX[= T_g./`T~C@_Jcj}\NXp#g<`2ޯЧ A?:57W7fv37O}rg_Uxy-%3Ք|g{ҷo|cdժhrd+DP.!AN!/%X9]ʥ3p$V3i-1& FU5R/UUaӠ2Izcf-H7 B;ެxBjs3k*,kEeHq^sMǜRE)_U8\=B9gO JP5bqbmtJNsʜ@ 46FkC(q8%2ƚ,8VL 9L +b%\e4ҠY:1q`W~\*4yvP.H$R`o(/@As+!0pN4ibR5y ΩU6&J4V*Av*UܛzU%/yY+Ns ?L3Z [w֒v]+X Xopw La86\ bځ!&T#ss Uՠ#63 Έ hԦ,P`v &\MkE "I)*9d.FB0l4=;T-@Wΰ˜sSLtWna\HY!v偂ke6dE̶ Vz[M Y%=ۉCr @>z'8g_sJs:ڒo u.ZE]k3;5[KU`!2jVkV&$ˈ!"42L6\X;\0ƉVRkNESBX:S8TIÚh̉iڴYP NfKAp̄yRL@1yYsx~,//7V-B1E=&cc҆$ (5h@RUVL ;g߿}{&e2fQhE.Y|y-sHw{bJt1SEa9aaF05=y6b#{x9s|pχwc_,P/IJtdfy( 9dAIcMtU$c˜ɹA_~ڳY^(g9вz˙%:oSM2P790GI},-h\OG`)m*5yK_gmq#)XЃIE૛_R@6Xͪ jTo}Z(Di<"Re; ~Yk4׈rM9S~uaoMFkWt+NZ :1Hqd =UVsʃ#fSITRaI cioyoU3dnSmXn KFx1VwPmb4/|V*6:ѩTOI͔k؍xD )dV p1 Ut-{نJ4 QѨ!nz QјM(ѸyJz0 2ӊԽ&:n{S$*8H5m\Sꗰ+u|-e c4s+,8T4PCC4/P'Ҍ,H"4 h%"PIfvw BOީ4 ӔBx`#3J,ģ?Z̚nOFqknTzŶ4E;my߀--g~Ln8}pfT=[8{xX QKTΐA+%3סH:0,W #橒 \ SegZ*_HfQBb [vѰ<L~\+1L8I:(5HIca5ܕ`'&ep*tcnbO;8Z"$H# "8 FQU<skiÜ㑏-ölYYϞU,m<_LKݍUA"I |;P}kiԤۉUd@~bRdX}y6b24rf0.mAoS9CN9J#锺vZm۠[eyTa>V^ѭKaӛn1NϾ7vH]sI377kiB/emoR$;|<#]ӝUp (yly\3'7<55&HҲ 2L3s"Ԅ:}BIGV~vJGS'O+.n{mvs:=i}udQW7 ̈́1!$wvqms?Yv *6yw|1[N fa:L";ZxЬz4z<# },}Fu5!k5[W9pT6 i[f{YUKdAu, 2+\]c]S2j _ MiAumO`@^Tvfсv{ϑ PHVB5Ĝk5`4ZFl]fY،RAX5#tB&D_ c*}& 5ҨKIB$%ۃ)yRkLba:2rd)D;DheFPȳ7I Ti/X;O9v@徲CK|7h)8HA"@)pJs $iqب$ <8["Ƿu j[Ow+EF*:|lDTu04Nừ\F]jbS=g&*:PK F<{Of$nngښƑ_QlcW)ə:S5n@L4ۑdO[OCuER$;$5hv7z |3m(~iJBgXLj'WY!XBQRԠӠ,{*s{*ʑL04cX -ta%U)ZjJD.T Z8 ZDÝ4!;VT%(N|z"i*̅r706uw>/,Vݝ JDvtp}󔲮QlZ3AL-gn[1Sˮ~|pĸh6ŤdwAZXKM4+A)R[=0jTAtWVO7:pwSR|ܮǓaԥ9kH oqvf`}F{m> nٹIGg@h'aIDUe%!Ri+G)-Jrxݚ26 K@/A^@Le}+5?N.Kfo L. W]H(AA+Y:oHb7(#ܭo/83BjƊFZRj#4^ QjY6ߜ}> U4^{yZ)߾|SЧ_y·J&s+™VaLM@TZ9!=B[=!'z:h#]Qy [T.//ͫغ0ی6"XNѧ\몮oStfһ xݜ PQ2j\V8~`5ɅG?(z/Ct0PcptgwI )#[+/њcM&hw ^udFx@|}ϾN~kϭo\# T}9-DSyXFR hvS 1P``s e-+rNfeBiFLF&|z?*`Ƌqo.J\9TX,7<)D)O=[#|OQU=]yLO^OaFd[& jPnM>a_keK.h5\܇OS{Lq(o6ΟӬ'  YI8p5j82G}Oa:\KW7eyNU2{<&vYbbl/aɖ Rj&LX>s {k*{,<d)ןr͍L&-خIBӛ<&emR!=ܶ~pcvɳ^ pFw>UĶ;VŶiՒԞT~ؤM369p3f9'2]{(nƷ_gOƁTVx-(YnCGTyf3<4&E/gv|v9n}ѳ+=ܻK--`]1xux Ԥ4 ^CsڦQM8$] ThZ4=Q7N؏nhk%U+d\YbqGX?AKo 9xN:⩠ `_YrxAɉRC$L\&{ 2('B&\`Pe:B;PI\1AA4H%:ErUX*Y XQYYs8U}:flfFn}O~8|z4nAI?R#|MpA@ K=Nw A}֍'X]WZ$h\TTW (1R4բ[^*"0)r"wL( p֠q@8Zce9zZp)eKERZG U ^@lb+ꀯytts-pÆ`&(Xayn8S*|8UU"PXm,Po&u %.b" S,P!oQ.Lʨܩ`B-(MY:T.JXNkmhRqsj4Á"zz#񋔙p$1Hd!Wx»P%N<H*XaA8׆)y9n16J(aI ׹ AqPFz\#x$ ̗1^EX{H.W UC_.@%ro|u)EzknB28nSd>n=}iV4(7W<ŲgqgqwŗisʅjQGRM _ry] VF5HIx(ēwa ɔ M WnH-x{5TT R1de#۔Kuu{_C`Cayk{\nKhSF%Z*E (3d&74ӢЊYE6R.tIcKm7t Ԡv8 .oF<@ Uku1 gK?|ẗ́<Ddbz,b)--9P [%@)53 t[& K-S0%_aYOڕ-,q9tKD?!*O,sVR]'iJB M!Ŝ |-we-,i# \O/WSw]MIc!F\,݃$ih=^QzzCr2Z(@JTMR4;x=ɞs@̰=|i̊{1xk3=NyHIA{W'Tú;d61-3r]$bRHCJ)Y%UdbtU1#xGNLP[d΍)ȯ )7Ʃk*zWXUеͣ:81i`~BI]QC!ֈBtv->AqډJtDe7JAO(ֆ{H fd3↵Ǩ#rT%%ЕE*sKVNڄf8Il=oi^}g+Mr}aКQJ\#swW;ڎK#R A0PDLo{[?o6e9-fhyWjkҎ6(S{,tUAWa噖Cs̒3 I:$WQtiEM=>sr9zUqźZȔb0IoSH4-P;?bm3g(vJm?+fC?Tm9j2 \6cQ$8wlYҵ:#555%u7: aU@3HlE;Ezlks'\pz똰g|pca@Lű.1 #%G Q$>/VF 㑛%,@b?Ԣh5`?9!fEP h y ďH/4»PৃǔGRYςȽ@A8ވ!|8Fw7>\:߅'rpr,D!?^{yd? ~{?BlVA#n}}ԋX1ǹ2&CfP"_է.`"P L(\qYDNAzD$c([)󜸢Wy~p)$aN;O:2OH"/,@W,p74e 2KBS7P~1DLq=\ 7<= 9`(cyPRƟ"_E\U|h+l^Kp_rz1"ݍpps7_''ggĂ-Up1<L3p?#|{ts,rw9r^c~פ}xͣgSϿ<~@~za su_mt6W>dFhttwnt<>Lw/C|fBg˟lꯪz] 8k],tvst݊B[naz̿}%zE!J\c\ js5tZ`WtvG3y+(3 ;]o,E<7|ޘ1N`7qIdsn=M.`Ճ;bQHģ՜|z g[]~PiW'P#i#nz:xTu6Mg>hB߯H5j 8i'8c[ OGNeWsvf}rп?}?z <<$8y_Y$TB)[<{s:N"&0"0 q4qgύKu'!_oFHU!9%i)N[0W.NE`ЊΫZq؅=:J_³]HZ񳫦>X)R=Yl>$Dbnuݟ[1EE1gT.tul>lq9?LԦUXyfd(dZ\DI ;6vġ,y\]Of\w&t@.i8PU¯P42x9ގ̞/y\9bԽr\p>r|4 gXP1\y@dF袲u:r4jݛ)<+`>KKڮX9XzHA!9SA7y!!_>BG!(?3G˻Px1c' X+]drZ=BL!K%SȊPЯ-x;UXzk|{mb 'p+cL[T,8V8F)Z9#HTDFZ [Fyu?Ҡp(W>չ_z$W^VюNݛ&svQ^0ݝKO`;2 lȜު/z3kNhnڝÑF_~AHJ3ܣW\6{99œcև8d-Ύ6ȋ3 r䔕~ j%g CFYnd+B^ 4*X]HJW}%cWp*pRF21H Q9QY]yHXƄٲؾ6i|$>G*mE8:R$=@]K 'Ԅߙu}^txjP}F%Q]ʴ(!.+5 ɔ$sGjtp{.Dc"aD<6T(Q`s=^>Jd}-lÙFdF lBJ@!5vJ )QQS*!AV0J߼pVii->/0!2^Ga'^3AeL(Y sZ:mkY L@%)' X5 pD8a8gD2-!*["cJ2Ƶ]-p`V=ЗuOXwǮW队@)#Ɉ~= ޷ZE+ LrW\-!Gy3 o.&@6|R HS6c3%RJ#H[1I :!x62 $gd8XK& 17mtS#ϳ'zPRRg~Iɚc!c.IFԙ'KE0TJh"rQaq3)9ރ PLzt< B &:Y[GS!yHiFCFZb@Txk4H ѬȤ@ Y9xbގ2[>)cn8l` jA.Q_ʗ+Ӏn,, x\,OKT홖g>qV)s]yc%1]<&rO~O ݆eA+ڎ,+7V6z7X$7BXZ'DD SI ƅs*e1A%KLs$BZgl?7"E5UFWbaAP!^<_x~3Y}yMnɖwe/0ʺ"0yCT0I[ԕ>➮K;N&冏~IdѷXo>+.P]5IWXS;_`DߣSӯ_ZPHTC3Pف*طAt3 !%?<(jĎ]2mu߀T e6EmCpcӅӌ+ GrJp;=ucqDvi&(Z폞($ѷlxL^Vg |vg66ҽ.z"OI5g=KD\J0S5~7w? yji! J[ B+ukŴT~| at틍jH ^ڐcFaw(/%R)ÁJ(->~œx8u/&8{uS!$30/ Bn;id8(1/Bf$a[ib QTd sZ~V.@s>cꟸ\(|%<ANMS+iF'Q.ѯEZ rEa< x+Np7( C1 %+2'\J >Z)qN+#dh,hPApl]Px̑'9e 5RGik5cܿ[ qnSx/AA1>K[ZW`cӄh@Mg ).Qg lW9.PpTLJM;\ph!֯~g]KҰ`u.B5Al9B;ïjH-?]H1EKޛ%rjhrnKBQChmyRL"`)gy`&~@p8t|ĸƩ ӡq>[il~ (=njSq~n|:ьnƅ[>۫ +q*_ckv<*(񷢳`|Iw;R /ĎYWA%ɪAQV}]ҧJ `/\r sҗ$F/_9::į+ %2KsLTBp›l*0JE_ߓ~Yw_\"K%ڀh _*iݎUׂ:KJK}?1iƐHzS`UZe##ϤsƎʈ#c#54|bhiQ>=_#Z#L:TԱ&x"#`S}Pqf([΃HzqތfˇGa7W )$0=&7!$yC)(U<~=0o1Хww}F IWaA|Jc:D4^C&MMuڤ1 e\I y>i:թ$V4Aw=5FIc ڻJFS/V8~ٻGn$WzΪ@? { x`m{^(*yTJn{7(C.vW) ~ "_LXdTq’D2LJ֘ufu(1T>DE &S v1% qi즷\e nbm\ؒ ").6Yp XŸ:``eC("Du[MLuUȬZx:9,QGXRcN0f K@T6U\LKz巻{5 ;Vq2X\BFsb)"Hnc>֠_WGˮ%["`Ȼ/^O#V?-8!0~~yp+ m 7= 9Po8,$[&2Kh55b5!nHn؟sߡ{7X6oU}ѻ#ݭ:BDz">l_a/?yB#R\Ż],D_4y;ھ?*])_|1,_}MJ 7w_U< i u>}ljq=$:%@Dyo:``KJ2CXlad,ð>^PltbDW$+/!1'Pk tJPʠFwo!M?VeUui<2.m0Sh롶 () )#`,dDT(&dbab͔,c*aZ*`&8~dƛQuqm ;f/Ӱ]D5 VU'..zpx/w)Q`pEfjÔU 79˸L&VBBA/T]nWUg$g\6l|,0 ְЃ2Σ?%PLscm pK (D;),Yww`ݹRW{ne(q+ w?޼>`Ҫ^aM1? WYR. xoD%JVq;⚃/iA^P4FTru &S\P@s\';A1,KvvIXp1B%ktDs tGE{4gacJ_T||5 kK` O_~^!;2W6NcW{Ek|%OįT?Rrk[i@^ۜE+^"%bǮaDj8qd֒ȝKKL-.-jtT[#0DԺd?=>OW7K2%{ޱЗFb_ @Nū1=E̹v -Ap?9/- ]h=YK{"~7 ^I[W:8 Cn.GqsMd!i@4\6R ]T !4f\up% MšˑMtGvSZ[kq~M_k%]\Ѡ.&$$﷊mY+JA{{O$ ܂A-繂8{2T)Dl yc0O_T ,L܂5eKdh=ߝq==fOV` c.D؊^gGH=")mqITݜL!1'S{m2)&u OblZa%Z6}]cќgN?lONi[-$:ZOC(8P#{_+iie ̇EN#lufvg5`]Կ~V1֕ov߿Q)nylm_v߱3Ug!lcYieOQ}z^Qp$=)9h^cZz ,Jܺw&89I]~btRk;Њ_@nDvsn}ݦ7ys1K3,/o9*&][<@Σ[ dh¸֪>)Ɣ`DqnA4%[$Ut>*1AiFϭ,9?*p51%1 cS8aь`7P)1IE#~鐠 T)qĜi@c$32wD5UYrlA̲T0KdH±@I9,#0 2<6T(seﰨ>4|L6K-)y?}xrjĠfr&Cq>φ.)9 yNRhxO/_>~l OB˶V_9%B@DU*I_@D%?32T KyfT|Tu^ki{Yk:=JW`=׍Tյ:ՠrGi44H%pBB 37d"e! Ks!|ydkqn\̆[R? _\)! ¾^"!=[Ab"6gM*S᲻SZe֊3U_% օj cUjQcf )#Ǯk0^x]0ei _l}Wc2.HfeQ 5† 3l4. MӸd"U"9\{xSF)30T9Q=NJ^Ic'_)7^R)gJ(mh6ҩy4(K+!Z?w&NӞw*±l7UklY \KI-ikq"oBReh9wdǡP[RZQ /8VeC_tc^( r @XR _"4>8!C2` gCs (<`X '49U1]IpINTP9Ӥ ol$@P]ݖLb标'"U5l9 )ِl^r𗝯$o6WI*%]ݣO2 R쁒{IT}Dsj[F%9nAšL=EW ϋL'75_U{CF9~zOebڒS^38` Hݸ_NOC eV)8˾Pak(IOr`ci)݉؃*ol;{xc"c0+1Qw"@QKG/g,J"P' :E!/'NivNOmJb7PX ~=D6ug?i[;z1isrֽev% UU$heHvf#CZ~&%Uj&%@Z#8%C,sf:]'uW9RHzR wD5jΥ3kdf4g?R-ﯾ} Kɹ3̍ >"K !! Yfx6;ym/!LWQė L oa\p`y7g>E0``^8->Gqv!@(Ռf124dM%*`0o̩y*%% t@#2JG&YeD@N w u)CP IUPs-_Bb S T@#,=P faP<"!ug㇫A6Lgl+glG<-c Rd?c(X:IJw5jfF861gPO<\?{ƑB_ݑ0;Xو ~XܕHER%gEXⰧWU]5CuBο &66cPע9 QYB~o? (A| >I??$u]ŤuI]d8. C^*JP`cFwqvPNkco 5C+۫LA^ >Tk#|^ncVs0~ήC}XIF6DDƛ<ƓQ..aqL1v~yvL.nQff@_,}φ|A5=| zq6}2NĄBB4w;|:XـR)}Dtʱ&Ì/OeNKr Ś_0Sx)i+e ຑXed;"`pO81"=g@YO%§ΉP.[Vit-4#xO8xWVU f5qW@ $qDF iT2-њ[)1(x&8iQʒvs /D4pz%ʁ>H*[F~6 PgQ_jH ssd0XqM1$c= XY$ rBdG戜Vؑ 8NK Bsj1*%Rq@I@N8 SG`[Bw-rˠQQLRtٟvwtZMA&r5ARL"v\A&w4\Q9ӻ\ZiZ_/:Z܇c$+Df^[AT4 N'x7-*p azGezc&mwmZ,) .nt}{HϓuCmb1+vdwc8#Rΐ^Mꟿ6h鑂{3 O7Qc$&O@%SD}[`LI}0t<}]'o&o_FSZ:19s+ pcUsh.|h+w?4?gWln S]ڰ[޹JHhل>s~`N>`4vRoUX5KG2ʲ˽)^:n 7гDkvT,ade^cX9WXbj|V]z7 5 Wح[w3 uGUn+.wk<$g~H u6]F|:}vJLx0т,SLx{W*a #ict&K.U#oޭu䐨28; oc`3:2&͛~/>%"QadpZPhom@ZzòUfR 񫋋@Q^KK@jGhPZ+%’ i-c#b0Ҝ͟}Xe,B}0pSjT:( .(0BiO[yB %-F!,#LE.% ȡL˱ s?=0]]>yot"b<<%0 2, ; D "oJI)T04AjInZ_E*B%A $JBeSEZ(r# 1%6 N2Y`IA"D%McJ *\e`8GPF Ӛa8emL-0/ZG;^I1Ѓ"Z*BkZ:O gKQ1 L^mTs ;DSf)d oL,}A2?e\cS99aNWf k HeZt_Ţ-%)| vځGOio67 W*QkK_3MuGP7 s4:F[3\S _?]QC`=TT Z\˗[8+Ad+u;ϒs]Q'ˁυ*&~fHKڃ NA+~֯80dsGx(&!%RW*(A$ sR Υ]&T q93ZopbH eCd6sr) rNA .}wvEh@6&NI삘beRY+I|ΰ3[{uz<蟆 2%ALj4'UQl|ۑx\ݍ.-mK3j3K@r#\3LN*2l[V.[$E3IFIoK2ZsF$$5`@Hw-];7f,ljl+սr3m7(cӦ\ |w=ܸ=ǔ*g߮{Fp7,ȗ,@BI?Y"l^R[Ms(f2/X֧~sȑ- Ӎdxe k3l?՚P7,#BPkNjc|oWBGY&I9hT@v0k<$w.Y2)]4Ξ`?hdК-E_Ħl{4E܁\"ԛ-$ڢ6$w.˔fB6uo(8a}/jBFX*qqڂY"7oljmxխOcY MJRg 2z^Qs[A=Wnet:4A_g5:k}`@e&z!| ZS< {? Ă#  +bb,>3>vԺF GYRtoPVa{#49h~wɷ:_(gĖnEl3XŊg,=;+g)H[^ ^S4a|5EH{lo/*/1B1y??Ua1Ūxb ԥsaGVu7߷]s`[jXrdd|_~6 [hX0C!K )D,A7ZU++IY*+i.ʍU&`[)iag2tlXM_$nlI1EaS)'`4PgO{gRY^|0YBSV|][Ӣ{;T́1A/j95_7BEWű݆y=%}KҦr\jw6_ކ5[h> @u_v9"e_NQM9ŋ瞀<\պ c:KB}ccH)7Y)[4<-T3,UljPͥFJ\!E4VN uT#e^ƅƕz%26^믐RwmhcٹBk氵 zZR4&|I@P[KQJU",R L(K[jz$+ޕ-" AF<Et`L43aɥʒ]ܜv5cԻp.[\=pZpI‡{%hǏߒo>>{|m5(OCh3s{wyFᢞ >ll|W՟wntâ'Rw |~H&"a&xp&p,٠D.}֔ zIS[I9CfuB[ b^ѵ,'\J̅܇?}?fvN3p>Y07OjhL&*8jEd7 RR"rQ W_yZ],`J(Zi0Gc 1:%_t63'CY׳k )OȮ^VziD^,Zh¼XoIð<j?,i%qɫ̛\|;t)@qA:$(Pw&it[[\2WY4it<."Mh -٧i|Q{047 ^^!M zx!M!&єQAPVq~x<{t -FMe4J-AxpB-m];d! TwTe΢55~P|c$[6T#TvSZW 05/?PFFA+Q?B*A뽟w} f45\Xb -UkW0խ_DQQQh&q] )*C4793O.&! cQBxKSp3Ow]3svQ U (ILoÄlC6koLǔ:f!xd ܏'G T0g%R+%"hD{k38!QlY-/-0]- -C_93Of^p?Gp.|!X ke lȞ~kWw\9I6zEX1l 0j\jbB`b4rW0ύcySrm$0W &k< ZGC+RS0%0_$R4Sn*22*$<(k"m", z-4d 3W5,s0@1 dQ1>$jN$.>^RY2jy&J4ibw@N .z!Vh~O3(DbٞBV;a˔]_~D'U#)qOBPbr%GvF#vۗ\;e"͎ZD1iO#Q#XW;ghg{Ϛ=}\\"Z֊h,yE/xC_]KB귗W )G_!޹ߧGڗw9EتGWHcQs&@QڢGQWDAK+Ap= A\O#k-h:z fm\m2 ŊwN}z;NIjEIɩ/E!XA,Ez% TfG?XEK47/Ç:s&2ӻ{pG`C4)LMOiK,4O2oK~4|<{n2LƷ^y1aVx8)I`NNS*,w)f>^ST .<YTPW))IBМZRl: 94d`]Ux N!cRipкK2"JK+r*uʑma4 {>^2 ;:!E= 4$qXY@)2(3ֆu4F6~x?dI7ςyJ;f8"%Nз8pָOx2L񧡛DZK[N,]|(HĜO_~p3?֯~MR? O} ?"`k?O IWpR20I|u{?~sü{{qn)ʊU8EK.7WJpq.5˷\?֒- :{_yV*\g݂ơĪA%@Uy)nBIuQ,:9jXs4Zr\-7r/7Z"ƌKRFDBltvysqncZ+n񇟗=^Ew\L{f szvh1y_{{leg t6 m5'օT^ P ReUR-u2Ww4̥ /g oQTp5[xaoG)_2ht%S2K y*V:e{k\ ^W(Mxbr lLol$w0a`+a6RCRld(lLi0ZkCIeQD]4ʇ)qeFhS.&5.* tjŌz 6w.L) VJuv)!Ш3eZ;_:vTp|P|KJͽSdCc55s#mxLqo֗\q七(tiqkҋ}y`X<> ƻlg0#]6eLձd  PNBxpȝwGi-fԍ Q%-(E:_C7oǟG#kx<$ll5׸y4un8}ψo[<<ÓUxõZ{Dz ڢxҐNP*jyjZv|HI z50j|ܫObR z5*=N'_[""9xb۾ث2B.^E5`/%b`!}d&vVz_Оޚ'zn짽x1YT[(`*sL#~&.z{[n{X[ih>| 1iQDVq@FG/Yѷr/(h喯Qtu9pWZ ʩBp;濽{^9)y6 ZZ ˩ƶT-= +V1NCa}63{߷a߻E\73 BAC1c/M܃ƉS2pJeD %v0N 1Z2GS9dǹĪϣq{ 1XI+yo֌m6O 8ȡX#Tu]DՏ"U%.O ^?`('Iƀ;Vd'#f WLD9ƙp),,\S&o_S &RR|{0ٟqc5x9$p3o'd?l8TQ G: lW{|ٞ@2]»QvI&4f'`WN=]j-^Z JAw$R,Gl)0+'}UZ)c LD5Q*-YyP]XRjdU4թ Z' ,Ƙ4& IhҊTi6zin &RR㘟V+帋LY! LJhQ*2KpU6u֌d ZW/snW^Vهf tZ4s@#Ά$Q |pb>ÐU.f4}lѐ;^ָ}7/{@\m< 6nq|c2 p=/,B6z81B1!`ț7}/Jgw9ٚoz}؝c[2"I+`5;JYeGj'݀hA`Cvt#dw޻hc*y˃h =16ޮUo6+ZBw*П2[sJ5+iP |}QI;1wF&l`]6ŊTo{񟴱> ?'m*ݡ( 0Y㜮?h! ؠH`6hmc?'O zn?S`6?7khfgu7u(gql8@KP=B/K"<m|>K 9jʗ3L*u韝[ax{LU|À|AF΀ˁA>{ m%2>smr^g:w K9׀e9 5o5+h@et_iDnu @E(Mz'frQUIR\vU >Egu,C|с6Bl`@ap,+VBLLԕh 5ƕ:L=J&faw,yrL}T.={/p@M0 m1I'otqЄ+|SᩁkP OZ"Fiv6]RY]x,i .B%Qg +A臖"[{R4_.Tv۲[}fa}|?sOox7__3#٧oל;x,.믿\+c/1cݤ}nV'!sュ>";d1@e7$NQuL~p?z k)JSpLAf#D;hځ'i-a:BFMsjha ֆ\nL:L>J-쓈کWˉ}^mFWڈӨHpL:k}tYը2jcU*sY=-0,ڐw;iwReH1/Wr^D;=Gwlަ?~pyG;@Y欂N{M= _bɲz 3 FTa\nHG eW 3=Vƫg´q{vV;=tE.zE]]t׾>H iM7pzɹFqsXi 7Imˎ;~pAk<ޣp>/ 0eJ3Z.@DZG.e 7j78"&Ĭ,'^7S&F([;ʱD\Ȓ⒓c^\QⱥSu~=7RSr3DXJ2 O'P(J  H#1sq JfzLؒ{r.pIlzRERؾ 68QPd R+T)t ְIᄎFhDS, -|)2LYɴE B$5,%*GNHH<dGa-*g#eatɿD$zS3fZC^ZzM v`ʝ|[Z~9<,q`qxǶm,TSv [t(܆%,{?8{T`CDi$fVZ]H4@VmZ8ny)D LtB[B'BG4-w&9(>̽获ʏ 0l|׃B0ٜ,AeLctb6W<Sz|{s {X<˞Í)j* ϣm ĈQlIbW#|5*&ďW#=8&r 7Ѳq}rv}6-q8}<]Fw4͓Q?" K*zKNQBϿﻧTSFM1R֮p ~߉E fnH6Kfx".^.n|ħdzwYg*d+aЊb^~ } ܗ@˿eA_xlj9 Oe;({˺:Ao˟3]u5SDl_W s%=+Q*so]:Ɂ$ >2%NnF9hԈNotn7Z|8,kyؑko~7}.{52Fы Ρ[vw}*ݺw1o֚Dw̓ZΠZwݪ<07-[JOL& U6 :XSĪQ|Fd3'#&]vlvD)nnjVwEG򷰢w=(J+/5-~ X.xiy^qxܕ>2V_}swZ(8 7n; a>˓Y\_*l/UMK9Gh!\J8fń1䓶?]܅]e^rxtlyZR/͎o(aOX.ak1uv'\3t>Xk{iruކg%Y4;7ho[YXH a kTu6)۞MUS݌X Ϛ׫Pw$C|oZZxϛ5瓤{Tfפó NXij0nBȩ 9|JJ>U+Q*es)P`?,,~U whI՚8J;/|ǔx+5:9E 2\]3HTC\l;lUAN8O``>nqsl<UXH*UٟqGDnը!gj|y;BaCy>.n(K:6Uxk:O%SX:Z?NT(F> =al#C6E ggu.)42.chtƔA4PVIQbIm\zaIo#QyYß8e&q [qn![NԆcP/V3Bl:.hҒy)0eő%[ !)(]#gT"'Q%˹ɆF,uLF﬩탳p>~<L/f 89f]دcFUm7>kGB21t،PO f$Y1WpB)`-^e<;^|*F5Swp8ȩ;ax&8u]!y7=+^GhՁDL:+RFQ: z?CzGk [::{ړ墈 cP! f!7{# $=q*Z)ןcn͖xx 3h&refgN-3ؤy&8ό@]=87h=HN0Nۦ{a_Ϭ2t9a>LLZ̋l@N Z5奣Dpafò8qtv֣EJJЙGjB$uЉFtRpH -!XƂ2bP];^m'U`CJmϣ6sQkWX3=iJ<G)+GAL ;~H"sYC31sɬiE1h:cQYNAB;!c\@ `uR /l2IhRyKFesh'YYťYpYKw>j)oy;#g>l>PqN'kks\|Jg%+3YG,?ٻ߶d؝À? Ͼ9f?lG3>Y2t$,դJ&hAVUuUuu*04oo6߿&$wo܀EWw?DA?_>_,d]M&>|BWw&b 20\3eGt(8R0 2{-$':3܈=ktC> Jm:}%8!؁Juc-l#!@{$>9))Q`ㆂ`s+`p(   Hh$2RT5r8{<?.`&`tx 4dDsɣ(&#`]Pc+*L<9Ķ",#f$Q l ~`(0%0 \E #0PխX}x>WsXb@QJOYñ`1}`Wdߨ'x &Slx p" yR̬ϫ4/mq,xV"+Jb /#żֵ>Kχ2"D@U 4ұb`\(eb0}q,nq0%xxX_ndϯ_[\vb2A~6_d?o'Gzrd'Gzr\?!gh6ME[cD`u4'ƆB$i܃-u;(3~eȺF4.Q_-xM'ndw#{(+X..K'Opd#{8(8,l߃׽Azd}#}Ѕ*}AB0~7B4 NsryCO@Rk (}ީ=vpo^OS>xG ;~!XT{Ur :o$,J.W1#=7#G(0s(L"&8 uǑ aI͐:3G,4@XMr~8fr:r >. T=qgTpv P0Cx6Ģ9;Pֱ\Ɖ>C^߃ FPkU,lڛc j-ۿ6_Ƒ w>$X\]h8ţ[i<[TjerΘBқ/cw. =ʡķڿv/ rEuAi@kY{U%J[WnΑqa8iϔ.pMlt,FHQV)Ѫbi̔H~De$1MjEM{|hI20SsTZG)q pX&"@1%\HcE(2 ( yX$$e z+V{V~9 :Gqwa,3ކ,~k,N dJ|uRJXk/vU6%ݏޫ;Xv\RPUiB[ݷZgނuʄAn(XY35jYޕmVb5Q{ uk[`ܾM-O]1^x%o~>W~^f.(s{׸H]X t0)a$q,=vs-yKI/%eK)hڣ e.UD{.J`8;0ƪMAw"X=+S;~D3yBZ\ aEsy1 ʑBˆHb"J"үBH j9q8 es8CC*% /: 8BLEQe:~d_8͑XO_0LY- {iJ:?7?=tz)d_j]d폃,& ?2,G#KQ'  8FāHK( a1e`Rx< 6k!]ϻԓz糴rw}U]cͷ\{&}_;{;{7_^_A{kYܥPYFQW쎁Ed$j|_vYijN58iw.'+(=5ԥ-/W&"ab6(Ϧ LK@7^,Vf_nW4Nvg^c[Ɖ3"Q8( # A"q8&\(b"JP*f ) IL"d$cf`05>XٞJk5|8Ο~Ȃ|uhT8;nJsE^byi[sOigf<݃ 6 6LӁt`11=mp\>l^,@_Vˇ޿͆g),4Š889~r(C=)Z[)淛"t'9rS`^. 9K:Ǔɾ͐">A5rq.mI}⹇h꧟;leMxM;7K׹RF/E7*(Q5`M&HEq,C c8Hpc:ER'0  .a(&Ƈf[ )9O 4!8h r6ӆ f=AGU*|%oo6Ϲ&C)0"Vo}t+}/DA>^z޿[5=nW(NzHqv``(D$X,ONjǞR0Eg!pErA4P]M,aۦyzCaWxjO_Ԑٳ&X!”pXهr.}˷r>>Ga "%?S 24`gs0A<{n9 tܤ*oVYq.,3}ns4 pF'd /bAl+ު;B|VHs,"SUT\Wq|ZՏUٮ|rvZ[a}ZSk Ph!VcIBjL.eK!*[U\C+:QS 2Daީ=wx\8FXc_YQ^VO\ΜFj[q4 eھQD4FWF 0ʺO'^_|,E]Ӵۍv/oZkLXqadS{Q\OmT߳_lc-vAWB<>ee/_2pH~75 FO/ e r[ծp~kb#b[w{76-:Tvn$4v,Ãa·rKS/ < "^dd6cRȜº)r{ƺσEt ԰"Ή t'(:A)N.L9$ݡ\f RRݑyZ_Ѫ\RM(&2 2AdFŊPsRF4!b aagTt$RF=gz*F .3K Q,OW.5S8CM0FĨ0&T0# I ۄ(䡈@x@bl0n&ơJATLlX+2ef$Ja4Z} l3u3qqNcP\@pj\;Xz;qAFT57pq5{dU b9lάkwmf 1,`h4l@R1 R96dSavmCgbny8']no}.R?> ;m:#E_i+i;_q]j$7&?,U"d[ccyMuGRlȮ=k/w9w3:~&g{Tw4xs)vCzA`JvvzqٳѮjZqemllz׽ΖXw/6~fIܛUw1e J: 1;]ه3He/ 泈mFʤK2li}P"}M݄nK?fQ$ۚ2GʬmM򝳨OIC& }Gv)!I7J:8;gQ/1 G&GtAb:(#.g4H&ݼ*Ex DI7s(n;H Ft P5PWV|,Z#*iρz1\%pHX1tE'c,Z'@uk/VX AyZIn^->}swuTCDYxh ޗC!^7wL2jj\c -8_GZrUrE!£rSY`M.6ŅٻFn$WY,ru$Bc{%y6"mnKj~QkGSd=až,a)Us.K%z [B )˕vi{}FAfdmvfBpJUqH9pq]k(Fr P3$X,WB㿸fyVXjTKzSnj\qkfiCt5/#fNddN;K0֭v&Ԣf5E@V Vl6ʺcߦ8fT$Q s*ZJ\qۡC=74*QRMA`06UVح#RP:88"l%eEFDi4 /W0*X?b:y bg">4J~>N|xu|rˢ1p7n&Md)o2,gY{ L:a!w:|[e癢, 9Џ~T ceE8j@ r\!, j$k0fT zdfox2cy;XJ`)ٸ ցaIaoBM,}o2IC8b]f["V I#or4õUBBCDT:6 uu.k!OGgGvNzmA`ߌ˝3ybگ\B#XZ~Vzw4/pA:ꋴdr|i΋I2V<~_x<>gG|bDv⫝]L'b8ҋ;\,}`CۃO{Wm)w4NK8^7,pnJ7 |Zq<< [ʽ#e cuKX#7q!&"eJ]{9˫GSYL?>IN|~MYyqP|~yk1q gOFd6_,t::Oɸ;,3sw@xwG7Ɇ#(B xw93[4 (?& 7iE0Q%2Rx =ϥY H&x7kϕYjsy~r}BKJuThY5ǙHg49,9zJuŜ^CK$%ÛB\@R :bFg<65(BZyNpo Zůoz!0'Z [aD9Hot+oاPA >m{z6*~^Ńf896WgN ֒Q~4SVoZԾz4. Tش*h|sjt5'ގaƃ}.w.ɆMZrJe8ֳqЎ1pN?JC&ۊhLd2 5̐|49883W\)9zj08 k/)q{E)]X ,Ơ"c"WYnڳtC-UX5T5YX5}}A#8dzYXF Tl5_vGo&Pj&Pc@ .cr5!'xA/A1ˬĚ1 mwckSteh]p Lb4&͍<.@8$H#(5c]9kIe$`)2rWkEFweVp@Z%"!…OegaqQ-J{\ƻ1ԁW-޿Dgh2@Il'Lu28}B])DZm>?^LcfMfS xն)_/ަ]Q#ˬm\< VsOn{kK_{VϘw*i^=U5D@wvip!/8wBWcmkB^ I69n8U89]ʈgQBޢzoPD,hवbԀIk R⠉PkQhoT\LtЁ4*nrw !߾lJ-Ėt!vLX+/[(]RCI:I1 cn36Yc7يY6 wHN4*g>9ZEP}*DǯڣOOZ k5G IHy^g|Rm>w2У|sǫLzdޣ/I3:JjQW_b R`e!n??꧔c#+O6QLҋPCG8 5UAΒ^avG8({JQ\Pw/-.^5دg=*F2)GF'󓀻6mSP 2৐ M[Vi- xպ9e_Ζpڜ_\Q%[4OHKN٫!ϋa`}~0=|.Gpլ)W;NN.%;O(91yjDir 8LAK\{4'/⒨AYh 3 aΆokrc!8C0!^ScAq*"m7d0,޸+hK !dt&wE 0Bk7I? I$;.&.c{\9O}2սt-ѨRtk[ {v0?s6 =$J;:=7bw:]{#8\ |Hb+ P1=unvnЖ?YFʁRh{"VM@Bn=ȵThaSQvy ]ȝ+g3QPp ^v Z"r>W8B:'2\n1h(bH-V*dT r Ўh+ )O2QR]G6MZrI7lD/R{-@%avوh͈J ZRRn2pizi]2 S{`Y)8XʨL:W4pWi-~1:)FcҐf)j!y+Y Gf* Jhfv7 :&hң{0I5,MZv;]j4骻iiVil^h6,IGᔑ?#N(t}w3]7H`|wвtz`lx tM3p ;?%i; iF!A.`BjB\gxfs NS%eZouZ2,Рi(i!77q!.ObaG7Kգiu.ϷXH5gϋ|ⓟ&_>*@e~(拥~xN'wWg)Gwv~z%T?>Q E=(E8&^=rk$FŁ7}Fq= #BJ_yFhnL$iIRPAN"X9! &xIqV"o-Ơ#FOSli$}Du ;XGӉ/U]T2ߟ}M}7DwS};~1@`s24V aր'RX\< !O1S8Oz6/K7\FMt9Tuq6-gqi1(9Z|k~/ v >$)Q<CJ^RБc8؜UuuUw=6߀X$)cۦ+(waR|%<KQweHMH/,,\0k !%0yNaql%S攲F2&gcZ@;P/`V?1@a6'ɎI蹢:A5*1X o2MQgW`<~8VOsdƃblQ\/]L˳ͯ.YmS8ac"+_idȕ#[EltN28)8+ng|:&-`-:fq6GGGPDɐ(,RqHU)4iD2*TЍQN))R}ҍĈlUnFaݚg?ݘy)Lg!|޸=H)G؂=uOC~kcѦ!HH!u_T+$˷%2 ibtv? 98m!a!HNe͑VyR"s;T@#FI][2)=-S?JۺFPiAS;R1Cˋէ@*P+P.)CɹSS*A *`Ѓc︕Zd$s[!uDʡ#R;la+A@>eeH ÜK+0U8ۀ7!6 ќ[ 2*bHpoqؘI`Aٶ$QK%$,%JTܭp|ZJ`mưyϓ{<6Ba k % vF!8piaYz ZB tE9.U/6Z b2X$cF,-Q,aSA)83q`a%)1C| p{ÇƊh\ɬQn[e.fLM$X-';b6`phQ]}zX98?qVT}S=ͧ_b>#_s03u.` Q–g^[cyU:$ XcquQ۠xHBWؼo ]5dd2;}~7Fd^|O\ySœv4c)qzpAXǐǝcs7 ,Ry 贽.DtFL<0 @%Ѵ{PN?zYbQ\;)vR,IYN*[ѶYs4\}@)]ؔd*ƻfZ^ YKGO M3-0{#~m:hnHe7KA#iÿV"GMp!^"ikHo'lwrj͓ީn?L: W'oo'⫏~u]M+H}%i~uƋ&z`忏/fa8UjBudѝ3p?OF9 BOknw==C;g:^ITXh>`]sKc\cV. xUjFim~9{EjϾUj=61d8qQ+qaC˂0h`1ɱ G$AS"9R`…1  tG/Kֆ:PaNSjpsfN\ BnHκ`s' u>[lMJT1bQ\~U{%YvzR7JLZnrOk614innf[;.ES.E2jL;ļlpqj-ʉSE4\AMY덤vgB;ш~.pzl:(n) (rn~ȟl$'/]50aH@1)Qbu5Ch?9!©5RJGjB'  9,?2-qN 2d1+s%ZjECNB95V[jN~ỗȻ=~<+4e070'C8+ ?9t6_ ˇx< Bwf6UӹLV}\H>"DqB`xoGfnoާ?tGJF);uV RLrKr&20 (ᵲA&NE6i9ʀ4">-$rS砮a23M`.,%Ƃ*Z$-2D!8)r{Fp9:HpP aʓ ,'1*b\"9 G6<4H#$WʚЛQgfw7f-tNJqSDl%%#% S04/ZlB ,r`yGA(x pMbΚ9ULƓ4~{z  t?e-%n>h|1TpY_)0F}K -}=!4yηw}p3<2FwFr; FZFBUˑѦ\~yl Z XƕG-Ì͔s])[mF0zp!ښcW\8ِÙD\ {CS~Qy_&. mzAajfmd$7ի:4r7V$ [$Ue'[nL$(қ Htr"- =mҼ2C,hJJ9tpߛShQkTdLnJ{N 'l*Z2ilMg&C:CZQԶاW d=n} 2ɄB#4[3*+Š׳INQTxnhƩfd<(0gsʍx S[vzL5~)쳘 }F>I#_RG"/x&bG ɗtċ5s ; {( F$vm#Z6Zim+p ;?}.&mᵛAaec$p`]9̍TRt]?zQ76"6/hϙ3]EƍqFdN S<_V,Rب,շV[-Vxgrc/٣.:Cp)GpVi:2FADx:П6t{lj.ci"oRIMק{P0EwM蜨e[yNLps|."}g{X˳7 ?Y0@vI9h DfS)h<SNK2AAXeθ<+l3q~!O|wmEð5*ҒXfgbͧw\ky,ᄨ-+ /W!](Il;2_{VPwF]ޫ2\SQ<=20?8Vr I88٭Bv3h;=`;+qB-hoj:~хz59!);Xx3Bh>Rw~ءrbWpө}ڧ͉3vy3D@Q<_{˕G43ޢS\$ೝSM~zɠ\\I#G<<~02] ~"yH.籋y&.W9 [F-\S.*NLXWxKT!m!JF ~e|(>&>(<ȣPEO%*AӉvz7pq1C ?圃P7|A>LК#5:j%)>XXbuKΫ%VUɵ9<sC'`Er gvth$TNɒtFt%7Xל Jo%t<jOI}o0ONca5>Xi@Z?_ʽ) ϼ2sм3"Q蜳N2DS]-tSvZ3hÂ@S, E4Y9&ƀo-N 4ƩKCͪuY gۯǓ齐BNSse'/Gss/w> \E[ԡ<< y;C%r8?AQ)j-psmX} R0.߿ȹN׽/ 㦋OTjވ^5W-A39B;)l_xoZ ߝՂh@ofrL|a@K RkzX ;R:~ۼw1鏟ӣI[3ǫb7= W[˝v7mrqWI7%44Ḫ)58zK-x骻"' vMO˃9vZH(qFfoDBy˶Z#%[H 9N)+ǒ)fS%,KŻٟ,Q.~}܇ӓdA$6#Rs*  0=p6;" r9}w ibųрyJlYN/&T}ni%$6)QèLUQ2stTS|zɶ3*)4jNA ܃&xQhiIt^IQ{/P͘5 @s2Zϛ FABhq!$SR x!5 kC \'m!)Ez2P[ARrkj>GĢbw૝G_T@+ kc|)zF6[p6./O`Qp bs2lRUPf N:5ZRk5ӄ 2u*ɏT[Wyu$t|<}D%n=wg8TѸ4dhbPdOg+$Pn[fD ሡWבӺ]R,ˆX;`Mpr5n(U P_CJ3Qa*GkU5T Cvh Y62K=e,x!TtuX84ޅņآBAD\Ҁi!wRHŜ6Z\-)W.FS ¨IHe)k;8e0b0 ǧIdMOwn1ֳw}_ ͑Rɽf \ZKQX5 :4 `q^:r!26pjmNxKFo帻+CIF #w( 4B0K+aF8pm#!l k ÚNo~b"n j{XQIOxQ=*e9 Оx.Pvͅ(;;SmJS ,{Ja -  F  #Tal \&) PU( 0K<29!d@>:VP+%Y1J![ڬ-K;48A GިpƚVdr&/?dDDí:Q4t c:ywWJA'/$ō:tvՠGy6ݡF0u.s:nƍl|)A?cG(Z*XG*Ԑ:5&ZU" Spo]G-A_k4Bc-bTkJ DLԽZ3 S jVАT5^Qi MF-:]RFYG$1])1BxW K 2+ܕ-V0IuN"Ttb"ۓf)TvP fl@jI !f<ĝ`81@U^L$J@WHR'׳\xu+w֠V|lŚ;+ͷkAP92ͣ*P @P Dv#bj=[-E s롥I/>.Om@x +(]wV" ϖ P)I{A#%S"]$)%I}VW7/@_p k@_ŪC|IlLBxX5C`ǧ7G)4g} ?岸3럡7GGig=;få''&WҮӆ꧷d̺~Oq3n|98^z>]Ὕ~olB~zNN=1TiYLqlcEϛ>tD2O ~{|XY6PO7c'(gd8g%dϛ1F;ʣ>(?Nj?61 F9f:0)\N!OW);1;shiI0*w8l9vmٟIOX}VC[7.d1N^nBTzP5 EtQG Yvݚ%j6$䍋h#b{wS@η/Q-Y%L x%HS6)AL[~9Mʀ+$7k3ʞ­yQf*~\8;sB6@GYQkp=V޽j;\^h7Yʥ`PnV o3ߌV9cߪQ zR $1FB E;/i6Oh#{M>H6]`-b\׵kPmnFE/{{{TCjfruU{TfsR.Jl+n@%HMgg{DͧFR”oV Ŗ'@%ZS[J;{;ДBnVݏe+xbcGx pcuq]#ċg;cEwFz\ۄ/o mEA)ZoSߴͣэ|oQKΙRNEQJcUڙ#mkiӱ̏|ڙIC^fL.o":M<s~?(/S$ S/US8̂'@|N˳Kg^/..0sI 9MdO(2CgΈ~-j^573<1w ٤l]eaXSOWڗgO9Ck~L#Ҁ XTD4K$A)_$ ؟fJ%v95;ϋ}rr tdV ԫ !Dxiߘ\ ULOnKClrDN35p"n?= 7OߨTG9s^N zT a,'Yvb=Rf޼s7|{]O #iúԱrX<1k"Րl>.\ kX? g&B9$9LBpJ,rc, PlAL\eiwTms sjS-;ucw"ۃnm:md;kBUx al:^  /kkٱf :G>Ъ<7'M|#u"IC _pg:=.i,̱X\z/B%ٽS,˥X/zd{X] ٮ! [ʎ[eM *E1RQF(fK#sOPºccޭn~LǏaG_zun%ntqh-QR'|Bh)r+J;\fFX ݍ3)OuȗzA4}JZ NlW*h7nf޸5at޾>DWw.]%<.~U]1FI%מ>]OO+9F>mlaCG}ϱs`bmm&!s0v 1a܌\];#]:7:#; ~g[!_kvֹ;3AmG0J ibS!i! 7 HԈ&m!,.-eo^| ;ƈ4gT[-<(w3 3|z#[Sªu0@syU1&q:3=VWdQ85ʬѬ:,$=j\,l4O;;sx79YAA z{q6TZ]CFӭ]3ÙOI%Ricq.9Ɯ&X"nB+TRJ1Nz؉1nM3?-6݊跋x9p=ejybު0q ^Q>cK$Wƨ 81M \-`eT I]LP SB)+sQ)pFd"2&aSֹP74ˑhXʍYF"i`=Ly9O++Pkf8E.p!Wc,E<3>bi (ͬ3KrͰ2&nR6~etZFvZ2x̏n_v=]4Va}ULqTJoo6Wu?fC;[JǔĀ pq׆֑)cGhJAHKMPHKBL Ai[)--)y{UrzhILN [oה-Nmu;h-QT8)ly(Rsi60ȉ-sgkiQiZĖw̽*<fy$Ň:$⩇2G_1 y{i"z̿knn߭h+G,u>5ty_)g MY_WY5X$+`a>r`88H{>R0ޤ޸MpB9SQ8wuj⫒+/J}:]&[%Ln\lR>FD+"wٝ]eLx7 !~=IܕD{ ?eۖ$RX]FQPb":Ϩ݆3lOi)Ѷk nmH7.Q2E|g\}͕x'SSJg=mOha%0}!u !JOL{eCC9-:ڪ5?fZ=pH|ްNѡbb؈{0>@tx(pBq^]JY)SHb$k"e%Hp-ʾC>%lխϫrѳioN@o*T6@{+Wnfk?r6^,gvM^2惴F?2e;{^-_gY. `uP ^J;q94#%+x)i}{/bao/!KweFpfØG>dQ$Q%'DK (PbOQ,&hU4B'E`eM~cӵSp?e.nS?O)z3+l<؁QA,NoXMgKwh7ŐMAj?- #&=ͨ_;_}c~wrh|# 721j_\MT89V2UQR)>Eb}W w.Ys a]0r[mT: =偺:m\PzgYx+8v`%96 ˽_@V;c1\v}PEgm 9Mqh Vy {Nȱl*^KXT߼RkikCf ॓~;YG@)S- m&$N-s`i| \J3hh BUMe"V)[Zۘ\x|-BureZH+DG+=(gc#!#A{JIYP0k]tq/]j x]֚+.k{g~2V-3 Ȯ`!FFdZ¹EdWqYV(ilbwIjw B={ֲ{uw?gJyX\cD!lRH2 XSb2nljX ɕ6yȹΙά0r?gX '^=J6lT*mN`}<䧕 `# frp}K0%{'I YȵqgJHLS  qF-TZsdHhBpB8Z&g6H8:98-ƒe&D pF(pRjJ BZk ˘RPLߌFrC~Mb $k33)pR'"[ǜv][q$*Di}r_N eRKG"FӸ5ZUa`}U{rnžbJ"-0ؘrJ[hF2P<,tjM+.)!7P yE xpDe1.82p5čgC_J|;[ P fD_q9Wkݦ|,csξ= JHje"EXį])go $l`i8!)E/YQAD)c~|\$ˋȪz_|};4)H{R7>XbYI~\yO\yOUSC:%yƄn(cR#ðb*M Cba&&$'T n e@[K35 hi]zB,ͅJV1Qq?M^Wu1꯾Xa:$A{N1I2&_cO+*5FrC'P?{W7/{A6ߋ 0沇;[_bzݞ"m[ڔ(ձZ*=U,ɪLh#`A\9+)ai+|P+|}L犥Tsvas\KjxB $[!uqST͌u;ݘ#냕 O+` 8H \( qg EHJHm{S>%*劰B[ٸr,V`e҃LGC Z1 %I8@׌h9㖀-CC˗ @76}ڎ(t>ƽ]QR ҉j(cyRAunrSJ 7Ꭶ2gí`Pl4 &-3Ӣ[DQ Ƕ K9`p3FNIϫ>stI^^sN0"g>bRBdbs 3|R1zȩAGaODsRa(i1pJ hH!z'xL4q ha%k*.1^3Y7x?#`.\_y}J|]m?~ٍs{o4 D#e&%HCn稟xzgUz"wԥZO[ sբVlURRY 6i뺠-wϚJEM n :poS,9ՉpvQ3em^7$ 4ǁ$=\pd To 0g';G3uֿ@N d@VA(b2:bJLUz"$z5ƨTtdmJ5=Xj2î4bR rX KчX(Y0{jbrI9rlS)k*93UxʹG#(v} P:_; f\%ϋ޷Y؈1R;5QFFu"qNVT̼C< c[tvo LBM:iksTz1S0ȹV̛gB# jW7\\J#r췩`lzӃFYȵ)ZZHSsc\ƒ68QvΣZs% TgPCp~QnAy*/WS篊RLZ7|"7oW!LoS *3x DRطMU!b _L-xQH6CP2\(d!3Y\r>$2̺|ϚPV<=LNY>rɘj.fl9 w|HIWC3ASY-ήfV@ٍ<rv_浺 tsJm쓥%"&F76:k<)8`xd(+ot*qbH9!3Z+,~55FRh (5jk.WWFz쩞ǧh.vViҬ-{*W9>􇻧"b% ~9M|6\Z(I֔)LI/=d]%3TaG7']/81.4ZJ :# t`rCN9w"wyQy>j(+4W"Q0$/sMc/9 ݜ#r ٨77+,t<$#Xȓs4`rJI.9atA Sfǔ1e6|lf6494ӂ9q!Uoq傷 C-yp4j$/}P'yO;rP$j)\Y߯/ YJnk)ޔU{ Sgyn PA95`\ jȴf|\=vP\.PZ\83Bf!`jdg8Q;TsoW G_|T) ZWEe=U&)4n_>meK^moѱ|_/=Ћ^V7w/.(9gW^ȋċ"]Υ߭BKD/?^_~SYm;|DW{ݿ+]5ERZHK 0:3Q”RBJT0!際t {FҸC~Z3fN[K9GBJ E)ZitJ("(޿g fINfs^1㻘"}MW:DZ.:QZNk\iCP$AUs 1n 84C9(UTӌJRK1HŵiyLTi_VWʭn}HsHjåcpRxVcnHhAc#ydTIfFF=['Xξ) uG]gׄ^0:"u_D/2ͷ8U}@!p T0RǨpUJYjZ AQ).g[(BM긕SGV48AyW9[9:PS8  ѳ8B J;Bzh>HmE:\ŭjQSpY+f1Z1(%hLٓQD q3bnt/Kt vvj) (պ3I˨*u/<'82Ō~AC3ֳ~H9GTb5g50J!e޶&m(5dSm1֖聪Ns{`Ts;Zؤ%Z5jӖOoyıR;$rl l2AG2FE gszojj4QfL(&k}z~nfE/:g1h߆C+8G ץM;ee=ƕ=3)TצD*H~2n0qHaGWmuO~ӡWiX>֩FmX e`;DžPrAxxنL12Ьy)?heA3m* OFSb HٶgwufgT-74|#0&8sM&KoVW6} ˔T;"bb83>f\=nc4*9HF583K 4e-Y5 s:f^R0>d:j0NHR!qzuQR3 t4$֔NKJ,V>[p5=;oGǭ+d'to_/kKdB~b!3.|ĢRa:.Nm.5R#EÞL Km6uƼ?*? :8t:]Z$ѕԤ(K_|F E|"zA3 q )!rG~F?'Ml8ixBH51H/m$StqvU5_? 5FGQR TRLI9 =eNUsZ0W ZR|^#Ͻ$JHa*+( -&{qZًy~8nH9J^,Ήԫ*M1wjKdFKZȂ}8dHKޔyw%bW/6>n#<'ߒnX]&koG;tEpbfl8V' eNbGwVNlfXM8mlDMzLOOߪBtՋg}K%dZ6Zͭqy:v7OH3h]=X#{1D ݎPL'h4Nc^MgwP~ۊآ)L1+_Tp3- Muz bbu.~m26Kd&1#Dt.V˜hBr(As̻|CҊ eB/[MllC,zY% g,r- @b859{1Ϻ8@b#XՔ Ef0ͧȩzFT8aNC[@Yߜ}\P^xآz֕.~W)=>b좗7DbHӓޢ 3:iϬʔQLd5t`^'y>YJXWsS 2W$cj%ZuB9uc}iUK0~+B N5`- \R+׆ʃ Z٘NZB-NW0攰fҽ+D& ImIn%+C+->IzzHۨ͆ѫM - >HM3cWp{ûr~8/?&sǩoߘ}i}©OT^E36zn`t~j1;T?o(Gfv6G]_ߦZn^U_=x)%)lՏd!D l gf]+3%$& %H[#aW<#\A4zfӷ Lrt-d#(Y)`cԜԣ]6OƜ)%n+FGpKXwͪ@l`e(=6 \XQ@t%Ȫ+4iϩ+5Lsњ3sApCW_kqn:qRͧ]7I3K?r:LZroNMjF=uѳd?ۦ "va洺 IW-Jϝ3"ȥ#2,Ps rЊTI'@ 6(-zCt(Z{bu!ZB@`jp{B(+jgp%:m\`* &c, 2$?{Wȑ LQy 3;nm(,7"XU|UI>{&}өyxJ#~xtc?>\8z.L_R G#x>6gH{{7LSq֘KqU%|[mPYquP'0܀ N_KY!~4y0_3; !QQ).\B흄u/\IO^[G-ʃyAr+Qq ^&$!(0G}=V;`\,Q_, ;IFX>Ц.4وB뎳?.x! Լ >5aҥmOa&yI (,K":|Sɸp}V,9%"R5:#U}yzs0yHMX]_c"~wJju}pz*jD^js;Tybrd1F 2C?Z$76Ũ\P]b"".!8 D12ψ`Y QjzƑg~&iQ8su4EdC@qBϣ-~oA ^YrGHG.Z}Ee5ۋ aJB ʣBAa,Zh2\z[(tW"srg~mVS*%pw҆Eb镰W-3 ӻ.4PK%s }X8@hbsKP٫qsd&lܔ̈Ւ =B15PV8oJ[ɔy ?Ȕr*5QYɫ7+!PćURç푠GOz;X;3\kaHÓ " :B3y(@u0RyHjJ />ѫ3tגن"*>n&c0QiWu=-L,?sp֕'Űbƺikwk4h-3qwP9 G;GDuMLhELh#>Vx@蛭u Hs h M Psx_ؑzXdY57LSdcVXq#h-wZ畠 *ᩕVИ.u:ιj r9/qM=mG JN^[Z݇a іGчUREsBh.wf10N,E.^Ղ)n3>EaްS< Fy1B::35bF1є~U/BWmǂʺP#JC0Tag[rH.T>a9%#ܳC&3[vu}x$uFfJO)Qs`McDpI0-qT-T \2=A並,ŭ g!nX[0~rE \{>L=>=6 do*|eM((O/. AGO'$ 8֧ݷX=[Ƌ:`ňisqn|!=q^]V킕E&PKh#]uIhTsn;vj|87֩:^j{u~'͢"͢m ̓𺧙Rj3.= ˿뚹UH =kDtgyEʮZ i:i@,g$u#J|PJ3\Ș(3`e8F0Saf% W&q}Y9r7o[Lɖ QZvPԮsqcS_.kd)t˂}y0t.vUmvp:/ u /Pq&%iapn;׿D!TeEe?eXwZDzZO Ơ7T)Lv@Љ,]薪Pa,Ͻ**.gĄw+Ɲs_UmRYIe&u[VYι j:"!Luy)(FU#EDh >rߡ?>D< q_Ӊf%-b+Y>|c"{,ٹ &it2q,!56 j-2_e P'u &;|ք5wð[ 2μTmϣQhtJ=ihEBOmmSN4IC &NyN[3-jTVrNcm툳xbK?kSmpS@:N\)|bbj'xur9^O-fmtWzI,Pbf2XҖ:ձ2uVVOjsm1<%m:OsF*H9鴬3 "[Ft 17/is8I(vmtJ>NQ,qzZ~ɴ[P5-npxיJ>% dRtXG353k1Z{t+*b@-əNpxt^IrYEBQh Aw76.`(a^d"HF4ں,`(CVT^b`i`żS˻k*I}H 28H|  S%"a^xnt0|UnjcIepFD2VRI1ZR#'B N3uZJJŜQfKkA>UNn9\s /­õ (wh,AڒUH 6Xt䃮CI,D / `k60L SGBkRfohCJekuC"= w@;X;3\k16@N:V;P"VQ]<q[^O=Ք~  _&49 YrQ#ŪˡJ,>!rtpAywpVR `4Xʪm oI)uGX'͞HU,ƚ'f̒4k1),ELy]x!?@9hЬ>x,NbW` x>}YgoxV(~rvHw9N7¼c fk_rE8 >Z(ic8jKͨW]It[̗pB,mzzp3xՎDJ{5m0W=GeݍWɨ֢iV2窿,?B 7]rB﷪kZh0El:e\4c28-:p'r0E^M yf.eA[ &ֳwayˬ,eԃy\o_5 /LS*NIk: X|᪖.lapK.ܟ6 vo2?/x5ˇ Ã@ l A>\Q.ic+Kkr+1s}}{3P1lʼ%f^1F%\2-KUyX:$#س”|Qb#3`ƳYʉH4VamC[p?`$1JShQbE<*mDHb4(<cBh`p"6uD7TIt)J~bt&9 Хs'&̷ |,_w`Z 9XJKOG5%WuZ+XLܨ|i֝( :J4k#X[R ׷1N1-!>36Ƶ'k~+fw'(LPzy Z6Sū,Oi*^VmK(U-3?R3uǭ.ď(Dn&c]Tq ARG:!Dݽnv_r\j_]~r7TX^ß߸2vo]OE[0FS7? =2_th^a'vUn60u037n RhsOnƞ{ yx;a>_z$E m݋a~_ "Қ\F'7rzCw> 1`ȕn?3jl? E}.K6D*Hք{ү\b4J_A(\݂흩UN]ٰu'=G{݉ ]PH}ԗT u(ths=0i7Zo5~wӸ"8qMb-zKAn|px8K؝>k另3?-SL"\Wn_Bq$C05}{׈H2we Cތ&#hfZff2Qtj?$8BR$eXJPA@0\1-qzʇ'o( lfi*c)̱"ci 0ID-@R+81|j9`R5<n~]LWM+с45%lbΟ">Vs5swx7gm.b[@T{\P7w4+r(1~JW˛Q2B-M]t|dO`)Po\Q|˖l ^1! ,6w : @q1X؂׆U4l-0dp:g2pe2qxiBr%V/`qo0Be 33p>2?'qI+w?Ϳ`CJB0vJZ[ B?^_9h@e{9~j/K20Ogti"q5MϭFs5$Cؙ 3'i$*q"k@y^}%7! 1QNJ$ۻw-X,3~Z>['0v4u<)𛃌D>wV>k9aG |؉*7tYe )D*C T!E=t6] G:VO]]潃>tPOSUs ci%W频\q[^GU} #F}pO# T:lP mS%_齍7[TbY@O+DŽg|1۟Yxx/mMXMgK;XȚ6D>6S[XNFb9-!ѝ0^2O;zϯd"֜\Y#pPF"ӔeBv䦍m2xgy# DPQ j;^ lA C]p;\Clv<yCւRsKXjL/rm9!g, u 3K4 e<Ye,\KrEv؝Vv7-%faD/y &B5 -ql-⎋R5<.+g1WܾHK+(WA\0n^KR=U%mlMy{zBe%ȩԭA;s~w[:u%HWȕ)?cҷ֎9m%wq/Oa0__`NJ`᷉*!-!NGv}vi\* X[Oۅ˜*kkBr )'MI4鄶QŻtJIg-wkBroS ' Q2ҐEȎ#LXD)ՑLSCO3g6m\\b|rNj,@ ASm^~|hh NCnw%iC^u`܍Ab),kT-X6ׁ'LvĶw 7k/i+\p^Qܬ;o KY Ǘ"bZ궀9)cXO<17,\l`N2mU,w'oW81nqjsH *IEvJ,a{ki8ypZ*gz2Oo&|r?&'.ǫdvHD=y7w^k:W{ Ld{6ᒧqB@cARX Om H1kR?XB[j%NITLK qSC0Xщ&XKCJH)EEL jD,B'(0%y.m֥%dzu;E.%lQZS!3>ސLN\wϯr,W ADx{5Jrtv >B7ٿܛeIZ_; ^UH? _$oB[Bܺօ"'Y.S CL![+cLPbiBmb >3yR;ɡ Im?/P793\HL>m 9FCP2VuOh6_^'ۙ4}1!k!<"*Hs(lD6#鿢ʗ /ʇN%_f*`Xrݭł$RbU˦M~ڸ?FcLh`_s PV+7"ǚ/9sqOe3yZ<ޘh~0+.so˅}8} 2x}B\9eWZMߣVõ>ΛW2^}W-^Ri%EIMP ZiQRe-%5tÿJqrVr%IL|/E4ptK"dYC)05Ҳ B8UUKri"`T@ò.1u`V ŠjL`U)-,*I +N!&s1FFFbx{*6"s=A9ceѶDy F1JIDkO7GzCeE _!طZT/4'69'EK"@[b/(E"@I’scRH#](C-`OLLb#(8*JKR+ZbZX՘pg&-)!JFd-X\", Z ҍc&:]]jJkwѨ$&fhM$<z{$uJ@u*pw+YxEmfXri45dϓMgd-ӜJ3r%@ ‘ecjBUSYA:jB%3/*4'ys $!U}[OF3.F!13BymEL`]Rui@_pf|3 {c*Xʊ 3yD}8 ad).a=ij͊2| ֺ>X`냵u$ȂH\AHjUD(a`OB(4fTKKb=(/f;.%˽3h1ғ|0xN#5JОJ;)xqWޱCls/&({PŵDPj,PUJ TXA8UakU_x"&cy^;HO9}OdC;)>i5_~GI+ĠOFfBZҪƊ9\1tt oe ]XSPV4=v[N/uG3?-S/i|X` ӛmm9ͳ&&掻_|dv՚hҏ?E{6ѕM$-y+2)D'@H198s4VBC!nmݻ<]~}rb3lp(Yd3K=%1s6eƯG_+pMM>[OcnLjv_%SRرSuW)(vk,kĊ+KTBTb εFUQ])^*R K4/$z4?b~w/ė 6: xF`fܜYLᔔ E!Z3)TB@ˊJPdRQbUU*8Rl9"ؘ;K,y>jչ.XÍ 9ݻ3_| ^){mq>,P?fݐ 52Ek޿i|n^ق| yA\|n˫c}x1,p /\n=f%$ff] كRͧ|X` |cfۻ׻걲|cgB߮6lxf4?{k.2f6N#k!E+eE[.|ؖ6z9PIr-r*oUхHS*|! %r*[+rԹ/GJHHs+\ >g$) L$rdRRX]} _O>0wifI};g㼩}Wf1Pj/vUF))cbdY<؋Xx8,ʮݛ|ٵ|\Hte̽wFgLF% ’ Dh;[ҜkO9#nFB?[Yt/DY!($ldG>yCʊ kGBxf@j%?7漫Phv7v;r1kL:Pu>*__6\\~~%* {9Ͱvoh:oÑ82Mb'0 `viVjQxC~W$?bjc/^u9kUTBe eE Yԅ¨TҸbe12E@j)5HBMjeaA樠%U"qaXTRmYT *P OP(, sA:{\xUɲ|<vgoOo[<-غhl{keod8 Ǖbk]nJKj3hGxskCZ7֛/lu-JW<_riڋu>c-k{^/V?2b9{3:uP˟,ه:^-neϺ> UӿK2,9Y[O̒Q/UyY 7`FC0>#dv3:強-z[ y&ݦVuQMq)mThXbĶMIc%HW]ݢtAH7 ;V6ӂl4D^0Ѫԏ]~!EC${KB[*JdQX <ޫH~eTܨuUUbPVP+]rx%=!-C8u8HGBhKb|ZNuE& c:B)}c#˚ц<ǷR<\ ؘJ؝!{1`I{K hB.@)|=LG$9W J0%]0Oz?#.ĚhycKN"d{sDbMJ=t3 ;2ٟ"#ke1KA>x#"k/OG֣Dqu8#^%,F #l(vM]Q,h~Kaf-&c]L\{L0>=wv[u@*gb4ٽw==UX 8ý[ x(!&DKQPcT1pcS[cһ,嘉mϥI׹]*5 zJC(+a2bӃJlwm6JqJK*s鞅lkn|JU*L $h?q_jfYkeIΟ&V[6NH}kyuE=oͣo,IWͳc/srFĤmo{M/6IuG#pѩ i{+yv`Na!/DlA=fcb11gn9IVk{vB^v)I81[r%V% ۀ< _K mBWwo !$ў=P KA5翽73Z,n?}wРg~(lm-N\cb4R?A;5F;aP̘jKANﲾ'ju_t\"g;ٯd+$ԏ!_(Q{ʌ#{F7B`0Ὣt"f'@P*zw⸻PF3OR#"xJSR6- -Pc >`%ϽOZ*zmfqK!˗z) (C x!W/x3_6)(mZ/D}\$ tW[2Y/CpXx7si `ܨ [.^\B@!fa*iv!J=.&cqMq$Ÿ`q 8i m@FXty5~sP>[@ykm#A2~Bufb0~~gs\s ))ɵ^ YO*Aɟl_o_r][|١_^ Y+j \Z@'0uso J9L?KI Ĉ_7m}kڸ l"1N 5d_rS5[,r@wТJ)aIW@B䯓??Sg7۾jY~ڰpRw9Xgm?NG3b[a}kp\ܠ|jQ#b!¢hU -E+  #% fLUl"bw;+_?^OZ?^]$A'5p,?C*W0Soiejs:srf;>MOԴ>SvӆGSz1% 4BD׼e@\s Ia`fTZCi!K > ? aJ̵f^q6l??3b~kG;DeeºZ"N/5ӣy_OխOČ A>3k8|=X/skiI? ]YsG+͎ I31;cyY;] (ٻYD6.}IB%hvg_VVVVVx(XhW(*8[ob (i4 J*ǘjKD&pbЇ7,kWu9 TU~H-ݱNjHQ假Q.&u#IyMg~ϓ8^Z-̩7"Јs1;e5%Y΃v.ɺz$I u]KȢi3;Q4v5liϞjivI%PS-u1|r/ EdgZM=M߶֮'۞}KbZuϺNβ.ۚs\H6PTgd`W!Aa0V5hQ I9:LRkx$I~&Y$#ω$0<,^b~VYPɉ='jhe Dզf"Q'L|-=xD11|?mBِ1d%FHXy]ZBR ^xt`)ПJ<ohyfZJۭSB S5_k\QK*R+fBk2tW3Qh,Wv]=KFة_ժ99uͫCSfuYz, RUc?"'3EcPY =Kљئ\F<;fy)B3@8 -nκ$tݧ86H d3$5TYָSUñg CytJ eD nj0+oB4⿌GN7E7W}]^Ht`$y{ڸw61꙼^w];8 Oip> Of0wik5w\ۿ!IԔ xM~O~ӓL~s»&^w_;xTZesØ8QA:׺(NQҺ(!n}{>iCuڼdjPXN0nF{㴵LǠJ ,Cp"n^A5}j ׇΦ1d5aUs87/BBo-[d18'P10' w oC8(C8C8XSܱpt6X.C5P1:7R[mO"Ӂ |/ГO@a2uUzў,#mҞ,%KBR/ I=eW/< D)-@XP /Y0dyC@`*pNQGt[ǿX-j&ծjt`t[=z cJ~[SSJVߧܮ5/T֜;iJ3;DS=J}.@$, g"F|\ dDhMȠJriM-[zrU7w-Hg(FY5d6TRy!5X?>_:&?ۥ?# YxA; ZH{ݗ/*,EG?NP훋ZE5~zLwJyL/\৩+wgo+O+gnVX}?I]!hxxOJ%ĀYЋe>}S1P R6ZR?h} Skm2heooNED֛OSr_ ka9F )%hQ-eaYpj+[ jv\JMo"|, PI*Ci3Lq<ʱReī$B^,_śdr 8/\u,4&˟:!a)9w$] V n-]~,|3 |p YXK$Ȇ&!7 ̈́! ._d=Ozլj1㧮촔իmm~g0 4ɤ~־L&ckp-# Ch1rܐch2"PV9oH%CIQ:O;Z|璉ȉ|CgҞSxADj^9|V)1V u­RD&iN`L7 pAE\kˍp4SQ)k F )V_l5iE)%-:oIrusSP5/w7ףU(-Mċtݭ[K9oF-Ntzfv7WA)|f?-AY|3&߄[*惱ȩL}i,Oy]Q17NE}C8-&^R{10PMkIC^6)ß߶V=ukA4}FvB9 >-zWMF
    J&*Ø.h^HBM!,Ri 8RbLP$UD^h̓%5}D-N@r5>d  *u(@egpy,SؘƷP(p!V+4tO4=1i`U!ArԂk/*ʃDEI r>eXQZ\bx\qRSUFˢ0IM L݂L(%5zU* h/2v擈3Y _,k +i j/q;4TG0)Jf%V9{\(rT=vTA)(3D/Z\Rxt ae(6^CNJ7+Ҝ{v`pd3 "g.ó˵OYDyK 4z$375Y<58q+O8JdpI4wjG>CIx<G:;8ᙜ1i:O;HLɗWuW@G>fW!"tvTyy0[jK{. /K~u{~w߭?ݻ4O ޽;oz!azI@0vy6zn]|?g i-}֩O8ŝZ[ilgAJ׉.qIUmTeeGӼc\^qmI9]rZ­Gk)]A;\i `3ΤF@֭rk="BCYst%Ƙ/rw~K5G$T=6+ $|՞Qled;?9=]9f8ZFFbpp,EZ)5:6 tg#kkkcѲܢF|2\fcڛ: ?xm_;~)sDen>PXgdR SQl~uvbLݟ>z?P˛-\[RY(EQ{z:eQBb ,_i IK3MulLR?t6ZM["~Rk.9Y\G\..&&C*jJdho  6D{nYGX u\hy(Ν !S", kVdX r$FҚ<ŌZ1 @PK_#Mas(" *A,5tjB #9- µ 2錰(#v¦kLD@;!;Rðc(hg ڹ@p z;4 ᠝r@r}lF)J`NT€z) !4SͻnQ ʨQ@s wvyy?(yG~PFɌR A< 2 :[#"l eu}/|z6;qlzzrYJӛd?Wz cqj ^K575]j)(=(\ .a;>gZI(NiSD~bue4lZ] zxߕ]}8hUg`|ӽ{qm+Dx9qˌcmL{}\^)AI^\E+3S_ 8&3#Zם{KL ~bSaEP l.Gb k2QDCɮO%l4A"8-rJ.3fBH8G,KآDZڒU XD0NDl.5b裙y]h3+'H-%Z[YjTo G>)0^Tm' c6[\;9ݝ^]槫OƠշ7әq 7kRJ5ZîM߲NF=bv#Fh{FD pB{ll.yAC^á]K1Z게(bIMF)9k(*A嘼ۀ?ϵ_'({/Z, @S+uBkiәq (h%ΜN9{KRNȇ@`F %Zon^W7{~%:֐bZhtA~d[3)Д$ry¶K4cd|K%U%IN]`4Qb-vwfM4vM~Q+u]iI=O$,[ӑĉUi50GT vNٔ^l8}yN532dSĜLMK=x8j`ڙT;ͷB٠s7_,E]+ޟLlʶyk#& r玎;Obe:Π]P29ZtL%iG<΍ q"<(o, T8mV %N$ua Ȍt"@sa+] h~h=.4@@[Ke]/_y,rG`5t Vu+W_˫5(T1re.2c-Re%[fyQq&Rj n("G.8o@m% ={K*s`$rYG%DBumM(3 6*b  6i뻈eAf K*.VùfUܠ{㒊&z.y2d|C=,=wCG#"%ϋjE _u~,]90 ñ5-# șLՌSN_wC3ٱzYmٰ0{&5*(d$I+aBxYfH4a4ϐkfh)TFX42k+PLRZoxs*}zϡl"\tD'ԅqk3`vF@eʥC CYsG@aY|SjFjTSB ':1PLާ?(ʼy!Su٭~5$QC PE0XoS쏨!U`"䍇hRL 5s+qYEd+A F߈"pTl*LB݈@j#3T[ˤ!a[χHCBް4(fHRBhl~,0WrMF lEY_QzW|[ 0*[S)U#e7kKj !=F0|:i['߆v{ Ux/]yUp9N?eŅd.ˋϿcSk/[K]'O>hMzk{w%|{g'7Ż?qb=;=fGPi<]޽E̮&v􉪺7N;[ M9o̷w)FrheRa]X_5;y0iCYqjaZ҂SԅI*yk|Jؿbq95}ys`4ЙȬr4w땩U1 b/U|C>y5{ĦkЖ/)p>ߞ;림 XpF܏!v7<|1S%yFSĖ1k* 5`;^8f+&)阑" l$yw3ꕐ$,%5C!2g`'&GTpWg D`oy(wY7CR+Ȍk7OoP`%͇D`(Ǚ q_W=BtLcMw6RL)J^ږ(,r`gu&?6Q -[u6B>lt^-FK(f_DY4WB _\ŁsEAg{d:D+kZujgyz-X:3TSz>H_v;3^RAA zm{F͐=\j.P6DJ>l.%[j@['J,&7K-d~oJXH $!0 ߈A.:FI/@ڈ7I1U;<\Gh oD V{yfQܒ{BQZ7)1E }ŹX djIFLvrFd-=0!o"ó_!q'߆u349Fj79wŚk[h'm A|rܢHnHHQ:+FJDi#(KZ[ c&)%?"a( x)RoJ-VKL؂E`3ծ>ߔ89/9J @럛Q-V<(6,m񭸲ZHb%| kƙ6|Cmb7{>#Neѳ ;\h\#)tH4Qb-X#-RztJ*@.bhm]8WYҐRPհ1K(o~[vI} kg‹[f 6Z6m ӛ̀R/p/6Yp;~rK"*Xlfx)L֚t؈:G'B'< ~a\Ϊ>:G.1@8QE?T`c;Mr'.iI:qF4FO)Yj rb&}=lUGTMYX74&HXsBc Z.\bWDՕPX stkiXmokEl"e_{*?׃^~?ogooZzߙ Yqo}w/PEKeP=|^q]7 }x~>չjGTXp FZ]Ջ5>vFޖgOOWݹp*kҝ_`Unҝ 6wnzxgWGO Oʵyr=PD- U{6/,uRm^BڽF/ &hc_KB܈ڀ7u{i<7jzҶνzQK_C  moSpD⠮6ȘCx%ΙHTQReF=ѤVS EeZ+9SDΘ}(V/_*:p (PU+ D dTPѳVQ>]\1NZW2fxjP'6VI騼A9kz;ppVEM#j3F_pw?w:9諡K*e'ڼTe^T5zXZ9P @! deΌe`L2kI9@g]m).5TSU d=y}|W%\.є.++XɖxxǏ1ߨr=[`5M:^,W{~T-rל -We(YRt%GȝEq{F-kzxW^4K&=>6=}tnѥpuE8jdqu%\~$? zʟIN'{x#']on}`n]͎|8?4XL:%Gt*z{Q7KTY~[c1 At[X D(jϛU6jΪĢ10+9q2'$fl!]Yjt?vW*3bz۸2ש s{`r(E/orm^t6~\ ZIڥ#r  Z:"5 S 4h/ P*Jf$+a .ŶCCսv4f3 a4S캥fE/lj~ ɨ;40j&2{s$v59y)*;eٜ/gNrٜqSrV9O [kV{;Ib]ݵWF(aVD(䰣:ɞM;f㭲0?}9[WD4 .iyN%nH=?<j~ 97n'`#B/>'r~uF;O?&`.[m)әsOnl ,J&߿rofeƁb5C 2wj)fpC|{귣޶ݿ»d%04R?"QS0B VJ)4u1xhR/|]g[s,C%D!C_zo5—jX]V·K)\Hv֛ϸ; yusd*٠yfFUpMJɡsVY'&Dǝ|JIަT,; QKQJH5:ɢfc0jS6j}!uD$X^4: xfOTBѧDӌQERP @j56 ՁJ!ff kl, PAI"7Je %~jCd5bxV/ YhC=(+ 1HZ3P+P"|\*a 2ȡR55Xq싢*QajPN9b3Id9`rd]*:xh'`p4ԬGhg삲(߹!(%5Bf%X|4wg;W$ :ġc(5'g)ZWiq9rjUM Ԫ1Q)k_/U*kk7ء2?,޳ D9~?8vqyyVtV^OǞuT~@S5r-pA?eOtNs!#zmY$Zv,' .( aQIV B$JER$·CxKARٱV!@tDdϔ7Exz /E{E$Ct TdBu-#^5 .sk<~OIP:D\W#%/~6LOU㎜ҫ"׈@Klm3H-# r(-rBH^)z&%X"kg2C#FΤ`Խ %Z ݑp+-jSh"-]i7:kK-֠_kM2=k-"۔0eB1B[36$^! 9Kb q J6X:'fcJHʩjpvjBN)hiٶM#8HN%VIM&d{!ߖ_]JR㝁/ekFlC!2vD @Y:6Ԓ\$stJ?/[%O#MlLƭ3|`菹Z:I2i:n()mYټFx!ɍѶϵPٻ=Oϣ߃a{+m\IiO葦X4Nռ:##:uw9ϰGo_>}!(=}G}Ŏ33 RSijG2[.Ut/Tjt{RYFpK+cQQ=?]J1Jip!9(}(%.7do9Ju-?V/0#>xwX^3X4 )H.}B\V Yئ rbe3b> U,m\MtvYPd+!j6JU)!b|ffg!˥FYxr }r*-*(BIm &ۊSSRեR1Ͼd1ͬ5&zڔc QP4KyPU#m~_OR[#~kX=5_1n =OXu־Dz`x iB|=^x']VIgJxХ2'*?՛3>o8{.aO?ptCɽcYm{sv/꼴?N{=mI;O b䧝hr}iry;ukl+y:_buW tW-+w3ʹ˻2nrH6b#|Wxs褪X4@iK"Ιohԭ|':qYbPL,뛒ZΕ8seg#[,=ݜGtފv 7ԀY?{~u~FjǷ eŘ $I$~tyxo|v:"ȳ(%YNfD:3󿥷q8|eϓֈyunEx(;,-q«ˮy˱n7yuJL\RiUo3p]Tv߼U7$s\15Ω;dqCzJ<vȒ Xz)}.~%gWc[!:-6S'7~ж ycA#o>p,Wl=u 9HszDICC*1D(4I,̰wʲ'Sk@jz(OJQQL\k 5ӷΘ1j'ÝXV# .$OBV՞! 9YACK/̗]#R; ݎ߿NOSP&|O"zm,b*?Ù7\#$`RUdn /)Ǘ_#(K{^"n{N~JOZdy k&J/H55RAG8+bqfro|VWkc)=ޗc5Bڰ.?hZ0%3mߚӵVLNtB-h&_H(l_I>2³9 g{1p57T}8= gzlBxG<1 zS`,x(ǽzSr%y9hznۖcMH~ډzܛcwx9]U˰yt \c~HEi3J,ݙ>x7l3 zn+\=|&W߆D맜;H<[:>؀c=q&<އ|x[M.Tv+EstJܷy}A]H!Z}J6P$)Ps2Qsźj(cI7.kJx>vXfX(3-5(3=BN8dC,-f9. ީt*c$m8Rumu͢Q_:U0B,'>; ec 7VBg/}pv$G}ҩY鷌RIA{xiȉ= R %{ JZ[J+ sĶt6HK5N >Z<~][EP-cDoJIR|WZp21=ي)7=PJ>~Yj/}Pe?{o,yO|G)E}]d+[u蝒ṖWкʭS7r:׃84@` rpmh{Dκ~E? 3F8BeZ66'~ WhRtS0[-^aA1 w',kȯ"6-rMAV2%r8kNԙ3@q -9ZWɗ/)*&m?t5 Y C82Pĸkބ9n\㣿̫ɼh됑,g NU_ss3iT!$/] pGKF*u:C"0#+ 0I*$cl޾aR[mU{ #)p,a'fxnF~rk 1c0Q]h b 7. YDDe j[2z@B]#ˢ.}Y,M=/LYR PD{e=%&a$730OǸl6v AW9xznA"!:@Oor YNñ|Uk12U QOJp[!R)K&p 8{o ->^tD%6r"I pj gdkzn?_L_/G+v[+S$F7EoYaq9cm7ƸeI<`5h[ր!HsK 4xղ 6U~8][haN2]LJ)]W5%^H=Tp\MZ1J,09+ Lx!T4e *R/){c3Jh{"hKQv}ed=43DlҭG|I.<)͇Yu}Ó?- Y봘i!o{zXTuxX CuP]kb<o_mF_^[gIL.8B<=vW gIL q/\<{o{ۻG=n=v_M#t+S/?{69=A&;jE mMNSr}aN%'%Se 7WBLq}]ήUAft8Al󭦼~]L-.`q>eI,{{]\_\>bn.7 2 o<#7lu^u"JcXD'uuD(:aPI*3܌]o߆Q9P}]QLq代hv^pcf/X?) 7EQDz^(ѨhS]'K!¼L?r/'CיXҊ$hvނ^N,oAgaegבu^(MAqi=}kZ NMl3xzp׷OYEzR[7񟙟er>lw *,0U];s/;s0YxѰ;z>1P珧 xt7¢֌(Ht|$l1"%A >ElgUOm5N:7 C@ RV7߻]w6;Fk2VZJc K%mkq%$}:U=DW9UciEQgU!\v>D0 +@Q":3kb%gkJq)KNEbf):<16a5z{?"B@d[juNo(B9UqG^ؤ"n>GDHgL]·7)`&+\b>kW9S%NnIm{w҇ȕ8(e2R3sT=vV\&+@H"ڸ{::3h.t~XݎcRaJSW] +9l'TpLi V#.3uTppa$zX}VwMIm߼%Bm>= y[7;]jZ;,P)Ҋg'"*\^_,5> ]p+wb]X]_[CnU|^vvXv)[B9@9} i{<8sΓCh6k}9BEG3uUb$;958:}TsĖ&´/Bg5<oQNw*{wӟ=uirKW%{p3*eh&&6З؋y/ZWP<U2U@w(EBp9eTL5)),Ӥ/?]H*B*5Vlm>qޟq,oFS׿IcG'-ί}峢{Ϸwc~+{qT'Y"cx4>ϊ[99]UƇ>+%/  CMme*0GIUFmUy:Ư"-yŽjpʷ?90yhphK1tLJ'18H qA1qxy&HsB:t&v:O5Ofx79WX.Nfr6IdX :r\RK7{wn8C_|%A͏WbO;ZH| ,ںӻrj߮[(FӀ[8|W/n~ ~IM^E&x@SJe)Z&r7oޭ 2}@hlt8mK/X204UjQ?O>֗~z@6D1}h :>~#V'$x4FI=O񽛩FBw>q JCbR)FO`Un<Z2YsL}%ՠpz4}w}kNч b @'Q6$CbPB#;`GJj9Cu#;icϋv\!qN hsyAt CCͮǛ.kwd;49w8)`w$g/0y;EGxy*y4Cy'PtJvz8aly`x0)JwrO$7Sh],NP5߹Kp'mμAe&w wcC)F };EMhԆR+ƟgvZojj\ (^V5,FwDz'(ѣOBبB >ݱn=/\5 ~[hLkM:K?UőFYMi=6 ,S6 &+&Lk]I+{}coAsTl|9A z$ :=>[K4 c|F12T|܍Hww,V}mۛm\W;u7Mgٙ-&Ԗ'wxLy U.TvˀӶ)r-5k͌UQ5% 鲹u;ilUג֮v%B^UlAߒi2%nOoo׺eir!cK9fLq ?W`e{?Ϻ~,cD^5PYS/*zE䱚}ϙIzerC8~l[}&&sK^ȌzU&r5\w@ĒÎN%t!#.JImϤC(1wqC P`{r15^KT;F@`rO>)O3Dwܼ!k/(0<ǢӡӴ:!*:-L6})en[Bx Rf NT0(đ"R;mApZv峀3$ Nͬ9PR$-PRK8+e*4434Ek"4 x_M( m7I9֎,h`͕p AX3u,ZrdvŸ##xdsC1,+qcQ C97-.' [Ij1MQs_a`ke/{mh^9P n49LeRI^HK nx*2Y$Lj_)Zتc͒Z'=@֙Xf*S9N1EH`Xc^L45ڀBmUR .xsMe>!k/AMImZvkd( #{xzzЈ$0*촽_&<r03O KW%IwY"Z޺!8<@K%?@wEOp#Lʼn\U\+NBǶA"N_է{stb|[X4^uv[:]1y+! 8vIu9]'菷6[$1#wO!5Bሷ^}N`݆sG)< t'mH`WQaMx RNcF.fp)GM51w* @L?<#Ьvg*Iēt}?,`&)i0GIXw|Y(b F L[)(Vu! NL2Se2Lx6wMR(/G%C^yʎ7+䟭i+ ~ܑAPo;cyO]MyH|$/Z8ϋ4S"3RNFyX֨"^4̈́p !Kiט>^{%LK>7-ԼnxK&b~`[~M/s8O<6*yqN=SBF-zq՝xgbTu$_\m;b1尜Ne@œZwP4K_JpѬT r5״ݗMLjEG<FLj 6s40,1}*Hy&"+ * ay86UۡdFCd $3OlYD=cl- 3Mmʹ.k+c2aH R#EAal\U_AR[p V]VA{_Yp Aw,8u`E}˂8,v -jD6a:ATc5Z/@n?#2( pWzy8r်:SBkZWI㣒sjY-'_Xś.瑩 ;MֵRl0aaԵN8} e,`8r$.j&W0;,X!l $O [X! ǵ>pռVgo{g'̓j29;O&ѣ&׹(z@Zvr8b|9ЬEOb!ob|Svاp\%.q˜vwڤc1)=&_^&%u7)V[X2V"fJr\fV%%6KeFR*?s In* â>mQ{awpO4`Ӕ8*HJ y*i#-ɄaKFF2+"e8߁xH\2yY 32W4M5)87VFdX48D99ͨ4 H=a GZ}.=fdՈybdp" LV}XHŴvȥrIU0/'تM?nVc51p@:0ɇ5)3+1. X 0㔦|Lӌ ˴`aMt7?h_#^;|ڴHE xt C \|6~& 8Pث@nz(lCi$WG&LyKAh"Cܤ+1S1>&ܒQFAdu),%NCՁ޻k4*{EN@9PMm/ռvyTکu]5W۱"Tp f* ,K2a+Th"F"3ǒףm0mF/z=r#xC{0fd˷",9Z~~,?&&kPTӎyZǂsP=a W #&lf )fs e)g댉p $'"F k3IqIS}(lIjf,-5"i<ĥhJWbl`:_ʿ\80lqxsJN "*J(MG4վWkSBecvFCp"V?%e3pl-<2g~a$K9ɑtfH!FZ[C`bL*7=k߽Ӆ+_=]>629'/2xJOspF(0g4&uq6{'ׯ^E'TP B3م2r\G8. licѓ5sHk9߉x;NHD >k\~W!oN@N 3"̜ws Y(m:wu?lpOK;1\+\>}2q6~wdVaW?'yb~N0+ƲՆndn{gq]*)A;bB h4gP:Gp}N~70 "VHn6;x;Pak"qN?JRK΃yԮ)Sz X/'u|1}, Qy5 O?w$/'BH,IKEBhD8:J˴y&e"ᅹp3c(%/N6$M$_TQ!.N.Nk̙_!ǐ`'y`awoy' Ѿ'݄)v$% P+tqr% x`s'%Avbr(5NvjS$zv;\)##Ŀz%NWWrvk1yz=s1`:㦯.Cĵ JNp\hC7Di9#6DșBݙu1c~jR}z@^h)?$QU/-/ɍ|w}:D|^3~$6~ks}XixsLnIMvB껅hϿCC.C|XO3|D=o(>!i X4ݟu@bE 뼭SZHX(Qz|gXMϐN:JazUHy&"+ * ay86"dF[\f/XXuduX8=nU\[ey|W =TɵZľ ]OżܼvF\{+vͺT z6íe䋫+^[$oO-s|u*Q̝? ر =JZU?-m<_ҪSBu$vVDž9AROJSyޔ\P]RҭM3nU|TP(ĄCS&1J2DbcJX3eHSE@ruZ`%,hw4E3CJ8$Ha&D%-D+#`5s/VW:R26bXaCC$ 3 b"tei:&ԀbKiL(IPQY+dZt>\o[Hሠ Zfb,ˮVnmT^n@M ZR3OIzrɢC/>p6a>AA;Uo]齙!|љbjK9.kY<\^:SOob<YRJ̢r&rqz =[|*팣/mgEF:))qD7܅ꄾJt;,EA\QVC^9E79ʼn8qE~Kχ';:̞B|Lzm|4٣l/|p4ܹ6Cڍ` [.$u.HkVD;$W;>䊠0zNPꩄ*0+X̡vjߝ^)Zn+5m R7)t!0,bt7R2:8T:Bnteyypѧ|-pȂ"iP8c?Оw$uy%/!@ >"j%Py 4&0d@4%/Gh"**.XI=@aőHƵܿ RVrsBsN݇S[[pDL$r9It!TƉM@1S4 R2DŽ3aSI3{$T,)8UY[pwcGU$b-UX-R#@w25q+@,^ TTxZ`vOUR )UpTj/+L^ue=5K|!.WvNB]i¶ :fd`/ QtxnM ZRN:=h{kb-nr3jĪfEn@MPlH~(POSEV&VF#9 +:sIT$UZޞv۞ ݝsVRAE^ 6BMڃ?jjG~ @3/V02fD0Wڦ OD闓fBwm5 ՜Px .&bEMa&X y Sl|?hQ-> }Gv8m:>nu0S4Sj8܉nLqBt uBQ%s֢[eVC^9EOJŚ5ViP &tmaff}ޢfyq3G"6I{gckdssa|xdzaޥǼzOM[]=9deYSHDah$C5N!%1I"JK^?L(gj"-=^LN tO[lNNxjuX "`ULHEuAX"R":l3AyZ&v2..oӄ`ƈjFGD`rlްv ܈ zsf1vyy0aq0>N_,r7/|?7.g|5ahl nw΀˞=Ȏz,ÃruMp%^XU zd7Nyy 'ȣIMUp *b8rGZLJHL 8„1牊`LD'L;mF}=YqSRj@&{^0K[>,u9VS~{,RNVہ Yz,RNKs,DP!Dn/-j eRKEK `ՔsֳY&V3JeJ?|,ҏՀ,h[rsIAށk/:9So/'ZGnqRL"֮DpU砠i~"Ӥ4ƔA/qx^Ck@Im8jWc>Ҝj/d&Md8ؿJ0D,TM&-$ L,֟q&$Eq`|3 ]Hx! wyKBx0 2"JH(>PDdx|m+vK E4q,lO6n_f3p [$E8(VMv5kqY:x'c39YV]Xxxʹ`N: /dhӫPI!wڹ[Sv H gh'իP SuBRIJ>:0 IANՓlb+f'E4%hҡp'6] YAhi,r,W(hʠ&&Yx{7[.r#>S,}ցk\vF1DwMnP!&.srz|7$6bdvmJb)X>?LK4qdex=Ll{ nn>'&wt6~Ue|$n:d1&f23}Xpc5=ґ5,.Janav?_y\:t*cYSW@$,f!ֶih \Dxp+NcT5 2|^8[? oͪK^]zوIhtދ O =<÷ :Qe^B=׼TuۨǘhZ7 N4ON|5Jo?xALFǹ8gSڟ?~`b cwNՍ&9g;>:~)#N$6ٲuB7նE (!D(C#ٮׯ5 $ _Xh9-:UC듒 '?i~8da]#dФ tqFh؏#h֯}g֯'DIZױꄎkE@' =W@Di-VZhUA ʒq>g}ff~_/uWxz;6&_]_Zaݖi&dM`=iZ%bV 8Ll84q!A3r f1W4f$p aHLve/̟R(uAUJR$#;x 51*4JLj)S)gjF/ɆI_]WwGSrekiVO҇eaASl;҂XWyzZև!|~f_u|#N])t@翾d|BdbHw\]~j `Z8P!A)Ny8҄d jproww,W')O4H\ߣEr+Bt7l#+ى ޶ھOvA[h=7Bt޲ZB:oKd)%~,$Oډt`޶W,h2R&KF,=dP!Yz,TTNTxRLuRFύ@r *\2Sņqf WYow+,Y ̢5R][o#7+_,Z&,^}I ٧ +#ZI6vR,f_.&vw"*G}]/qU3qjݑ?0i-qh\]_Ͷ`?rb8J1x*DQ wwjEdww %!XI#.4bB%D7^K%`I>_'u&ŋj^ gIkpSpߏM8ދ͊rޅ8bZtW䲗5HPKq5F =ѣ(tao(15 ' wِ ƹD=F.R('' YѕD}KP}.vY W~sqXk+ȣ| ;911tc䒢L@$I2Y*T&iFqLD[!5 pMNf]mhYY ؆R[{4W&hykjinQqFbJ$vP`Bm+$2t3:hOݧ6}NT^뫾S_G_^*s5^2x֙<,)F_QiQAHh FbGi}uv?M7&9Ctomu:j#93 H F'`Xs {3AQ}3߹*eY|n8+ idGV6>A(!)y>}ǩz系ݜ<8&^ֱT)jC#JDb$ȔfReIf<9_O'k7\o? s8L{U (@Q+ \+B7Jg$2ĪcHC0ԓ2j)"pD_h%.A~5KA›z1KOI}/5/tг{ g⍱.g) ¯;Þ(f).-T2r+"Y:YJ?Pt3{pD})KBgj(¹NBW5y^ّI]Rn h=9 xj= c.%2 IX4>ZTk)m #PNjMJG{݅>iz$0wj#BbXԻfC9 ~_}Ĝ޶bi%MQ`%ewFJT,,{~$/z=|,y +2J%,ONa1I&(Okv<ϴN)a^\C |,‡#M|XU t{rεNl7 5j\ J}MoDm?֐iUwYz$iTTjҪސ0>䌳٧%+}s*WĘhcAA >]D#Օ?(:ZVyCK+\WڪO=(8Tܹ gE%z_ % R心F;&W z1jMnJ=DDŽ v%M.N+grKr'{{#B[tyse{E %>-ei5ɪO6K{Ŝrn| id 7~#f'_~dZ,&_ [&IXt=;b&YݏveYI C~--Qi8No/\n*ƈ*H6}饦@$1o6$S:;gz.#; f2}$5FUKb!¢̏izJ],PsÍ[Icc|22ϵ;p>=$s/6dŎi[UsCFa74$N#-dTFy+yw1[d"vy\w4b//~Tx{V!=Z:xˋo`́mw/p_J*Xܬꟓ{g""4*A%oh|tjI0$Qc-ђ]iXϦ!8:=R]7`_uuVy&߈z&UDvO/}AK=<"cn/uy09%@uBb73e9Ʃ.^DwO?TTd|#87ãU~mG6,l-&k&Q0xIdOlXDobƭXf}d)6 N[k,Ȼ嗅Pɇ#6Q`V . }IhK e@E+Ub@*l6*C JY;B“ 1&LXۯwTN m)e3-=fӞ,q3 dYLs%uSkF2u1O =J@#_LWVd6PBe&rI9 !,E硏s+ELRbe3LYb;@c= ͩ腻kO!fshʍSaױEibׯU HVq$=`$2V,FZ"\JFp= WWz%=Z5;+'@Ps$=bH vK" X/SxGKC9Rfr=RkNCgH7XvwbbIvrJZX]٧U} Y"fl~z3tyxZ(񅻾+ے˿MlDwJf?ov~.<7_& UPtN'y^ygiWmjˑ$5vFNA/scJ92kNW% ?\i.y#a:!jЪfTVh},j~ok vΟ:ܶ+Y)q0du)mx%~fR^R$h i"84ҹ&S\Rvۤ'kBb“X5&ƌ&:O%*bDŽłZˆBCL!zygC.MF,ha "d[fx^Yȵs@ϣV5L݀۳Aj]RӋ>hq<ӅXՁHY0C=p7 9<gdx| Т$t3s;}zĦ״5^O,:T&G;t,4(i[5Ь Dgzvt?:}@q0w -@큡ګifHy锧\,&$'fR:s ̤914.&a:wbku.:KR8|λLtu"sƔ$s&2LEhIC(e1jHm\a'ZAXH۴:V hΛ5ZAVO$)ƊI;t"c&h(CSHyB"JkYؗeSv7fk01܂!׭ Vm\\JϺ~eWÂXhNg/q-KU~#Za0Tػ+BxWzdAȮX証oY6xk!rI "YZ?pþD*28 lTtՓ?:'9f J'˶alR&ۥݒ>jNeuDm?2O@!-J8wQB%O6C%D69l;Id38<)>%L"eYg}jZV 2ӳBPW(@PJ#}Pdt~xփThӺ<(^Zƫuou056٧fUFTNפϙC쒚^yd\~y38QH1sj ,k+]Ƀ]ftH9UkFVi,f. ؇яcx,e(y̾eႉw]:hò#Kef Jt*Y-tyEd7XbY MIgOzpJ^Y?-eiw߹ 3B1?ogw?';Ob~u!WlYYV-Rgh7\jE vJJ!PsJ*tST~a=ۃ[Q[BW gVf+fbzΗ-ԞF"8{r%>׮~O+/(?ޏH^]_Xp*A)yVG=ɄfOkP/j&ENGE>_bWT1B+H )-`;E:̖ O/eMqmLqj"$%,TD|[M7AiV:k-.o [\fF`ځ.4 szhlѲ]|kZ8R h@=`i@-@ELXC*B5ME쩣=猧RHv<(A  e{-}_A0=΋ ?{ȍ~JaX el~du-߷ؒ[j3$ZlbXt !]\K?a U;r~i'P$2Q>^M-c֨TE7bS|^1i5c{q%)xkIǟ_/փ4Rtgp4vM>W"DPhVAMT9!\E(9;`0Uս7>8Fu(t+$H xlPnj@-CuTWSvSO8ӈIZpJ1G Tm89NP~b h[esM%Gr5&v@EIa^ؙjY"wڜs|hu*aIJ%laaXB)sf{^Ԇ$HF3Z戶 $ ^؉j] RlI)Ą9moi|,y Qo)lY%L?(ݜ n7AKgo᭨n33Stk3Sk]xjފJeX&]aFېf[-F i_y ֶNM,QTY5 ǝmgڋR(?n^8[r(}/kk.DZ{ܜqovSI5?^N4Zj|BW\6tж*>|hVL{>0;aXVHMu[MN#HB-'RNtrӍ^1VN)}[=&=&=Um j,ņ<Q~Ѷx;\ yIh &a|Rݶ܆(m`:4BR3?:rxΰxvV6 XJW;;??rkC+/q {y'W\N=YTm[FL{%"&PH+1kВTlKj5fz 6;xx׵]k!I(pun miX\MzޑBSRkC~nKJdMQjK2ppȎy 3z+Y"<r?"<xIF+߸{FySTs]Èೡ+ w~a;8:z`w}?lnjNF Eidh! ܫqZ_wW =k _su+co4CÜLB 4`$a"ec+qUL|h)%6DTj1 jpXy2i2DAjLMSJQ \-z4NR L7 LԂ5S-c\  '"hY4tZK6߫On?/ ;ߧ#? nAϞNdgxW5 ݧ\ň 3}(!Y4erJv`&Grأ 12-f^cC`ܰ4)Y4{?Ce2\s1Gbo`eHN-Ǒ$ʆV*ii6k'\ʄ?tycyFhc̑``NV#%t Y/a`-O;8xNq6oA|X0̴Zj@\r]٨$mo0Upn#*G+a5tJܽ2q Jm]QD\텬AVB0WUpQqXOHm=@Ջ),օ _uWu'Λݗ~ noQ`4bxȋDÒIJk, W Ft]%J4 m#LښX]?wW?~7!~;= G]ol1_Ba'%88879%k+| @m$NO/]^H#e [.'2oDWy-Y]j%btD7,&Z4R "u*D j;D\\5xz0q3PɹTh`gUFT&l8ݕl*R]b/dݕ 5$6T gSMzd }9%GBp>a/^N iPzoԪ3o |J,'64E:o19m xj"SKwP4^Fa|$z/VF *AbeNL ǑJ9!,`l Dph &Lf@i_D"O q5̤Kn58#*q 'LP2Aː-VQVqtiKN!..Di.Z`L-b$w K-QT"T=jTh&t! 6mVAh4?Ze7Q =,#'BBիrysUcv<'-G_g&Nd[d4_G<60ۗ+tq^ٷ>\r8OJ?tA ½9Oa.΅DZ ea31 %|O>~Ӫ;Ⱥ}fx`ŤX [OD {uM5D) ey- ic m]kȼ–F򚖸/M@I*E!ؗ\|w w0+:n8y Fe+B~}jV|'z׹WY;w0Q+}jEZM&C9+=LJƇscH )obW~||833G dg7xFhB;vO-YQ5S!K ޏ =>I?'/ÓSg ȕjk_o!\xwսSq5#-MO "kiaJnӍKqu7ޒ *JTIH߂Z+b4`mJl5r~"(DI.}7s5@"٫Kp&&F*R;@#8H#Ԑҹ, wNc?g'R^cpd!qs}V+Tx }UTW!"ӊzZdmRxɹ#8y/Y2YD@<WᓝRN\㯃Gndg;;ftsCÀo.俨iÀ.CnG%[>LE=օ?U^uy ܡJb >u*@S홟(h5iȉh%b+u+ѺU Fuu۟d*yc֭zluCCN\E+8?y֭*1Se>wpwmݪW7EZ:4UtATNB,NΉN 3yJçCh`ΎJF :⾟^^}B Q!ٻ# %8I0;`zwg^+WzWyRԂpѨ!?sEBڻHs +pB"~"Hfa0O %C9[!`agHu k@9&x7 u,~ UTas)&e7StKyWwSB=lخJ.]VȈ7҇ԯC YHK7%i, ͚d/\jJ\#58|aiFi;={UTHCHR g~όz;?h:qJ:!xLgqDp11p%ў7䑷gpX ctjN;-sHY!L :L=  BCQH"^iZ+,X1U!4= }3PL&f-KBl_\{$Z9UL>7jz ||Gcj4c'i \kd%`ƸƂČ8H:ȩ-).~ kns"sb挵}x.?Re3q?s(xË%̂e RA`)6՘Pnvn6*N>syxs? $ M]^֊y$2 -R)zhJ@az2bwk&iC"'L C=RP!. rç .5p}Lf5k_y)|kdz'Yz~iJ.%T֩ Pw{JT޲TûJ@Zag#&PH- ( Dݕvι,Zn F,:!1d%ô!N>9 2f)Wi]IՊ3ջZ0$?1Zs|'u\/ ׂפ%hRkFvnԑݟݢt+|bP왻k9=FOQ0S:G#9P}fA;wKHݜV" 6p`8V6BVQKhYk}glZ} 0f#u%%t(ϣ|572@ku(ûR.g4Q#0FB8;:!G! щجSYRdlޅ-㏍h5KZKr'o=._RB"f:!j؃ӯeflafEL2b#!(ر;`%J;s쨞r#.]uS^7AqpeD*OqMjLiscܴ=l%+5:rsmfre/)ԩVdh\OvqP ݗ0ul6K]OJBbh̻Y5f+,`wv\~7ͮ`WRsEb|(OoþbZ3"2{1"Mf<;9U`kZJNR/* *>$qU`(uw4vs~8j)'1ylޛ>_ꠞ)cφ0ijKU8W4>!gO#!]r3ۖZJ06cDNBmsJw8x6mqgN{cWEӨP׫`MHκClRfh_V ?tvZ?ZR%DKTS85~I[z01`N49{Ap&n~γ rVOpKRǼ{Zu*I8ۛ(g!Y)ch$Z^BO-;ψ2F 04^y_k+ɎU?=+>W} yW 0WvaXT7.)M7KCRZP?V R'ܫg+;[N (Jɢ 壙^Gry9)Z.2v)ϕ'.Xa47iVWFNaD>$[]~[3.Eg$6ARQ'IIt/rvO 7Ln@.tʳZмrE(mzߟCh't^KBz]MT|߽OW'+A\$=ŏ_'~TWbS 坙jyX. Avä &֨`pS͑L )9 T"xjC%zafEEF1|;dp?Gm]>8ȲjѼhl$\{C Mn/Ih(Tnߣ{ޠ:BHލZUpQBr#wc=bFМ1K⑗4* er괐@'pV/Ӊa Y;<=R#t0%WnespATu!Z6\&(L^< F*Q$y iR B@ L˅ ~|{.k:/a<}şBWşgf6ե~~ g 4*)ű`FH0COK=5O@VkcaŒ7uY܂Ź&"RW?3|Hr4jF!nfJGG)Iqwr+S QY #2\~ſzc6Gs'kC@;'rֈ -!nB!CRC,Gʗ$T:(+(VXaBMCC xn(D$f5.äh9p "7 Mp9p q5%,W1WCjPV*R,X bL-9SZ0x/d0%!ڐ419gkH6e˟֗~* Tiv-*kG3e3TKnɺ=a<ĽE?Ql~?-.r[4&c܇#4//k6(0*ʼ`ini3+{[|󕳣V|u 9ghrtauuo&7 _O %!ew8N;d֨IMņ7"&r~(8Cj3Yd:S,K.z2f>$c2d4>8˰ !_ic$U$GCEp*#ɘzaC qr"$1zWFɩӭƩXZsPrBʘe,ZZ%L{m:8{Aw:lQZi!v۲n#Sn1+Ō r M}hT,_dj=T*.wh/+,!Z(x(R !BRu!E{ ;% D PQ\ݵ1*)+%nF<5/a>."hyb}yL ԞOЉ@bͤt9 G~|N҃ gcTL\HxSR0 퐮8GQ/n&QTj7A ^cs3\$*yh@lhoժrZ7apA[;۫|03pD4}HoҰThC+q#%'2rb{ؔWRǭ3˃X5 `AO-y%+4(Ի{1G ӏ}5vvC?>uT먚Q5˪lMG^!x`Q2#u_t5X0 ?LcyP ❃M(f0_~6+5M1:|CN4xa`q9ϝaǁC8cpfgjpjd+€^ߚHno)ߺhZCh%/wj0jX)hMuQٰ0ѽ <</ָYV<k۟܁=M"eמ|j空ba ؋9񞹫KLJI./yq5#3dDa^ Tz⵸0vp}{M9ZX{ye%ʼx`n5jkknHsR3==7U!b\gvq)`0.\*B8 @G CwOwOc|F$h߁ }D}mڇXJf0'K<#Gww?IdBE"LP{\{MK3vڌo\ZɘuaFd(Ӡ{#% $qlD2.^Ʉ؊7^J+^ތ}VuA$kF:F Db s4 )Vi%6RҚMm5HEJx6DN?Vm1U3PxRp  6zz1MîXJ&n:jr;oΣa.6_9,rG: PҲFUĢs1"]c%^3\SC^}}}աx\ޢ~ kd[cR]dX׈;`OEoÃ\|^"mI%xNIwBzTF3FZq3轾>A~ŊOw(ٌMQmK ȯw.d<'9WZį?ξb1VʲzΥX%`o@@\WClfRWi4t~thh=!C44f^m6Éj̜5@#@ h\d)ݸZ '_.C¦ ޜj 6M6QAXCZ ?xU£y3T4.ژjʘ(y]}Ux/9wDBTyYDbۡeCLO& Nw{~lwloǩfU͇<U;d´%j,<ץs]|-& "mR=K#ڿ,I"UKKˍ ]; Wy:ɊX~agpBW]+8K!MÕmNlr55sP [ﶍ!!aq̠8._XZ %킐,T8\7 |"#7)]FB֫q~ۻut{?Tw5Ns\s}8_K} M$ɲ"w 6r D<#˵`|dEysRӾ[9dh^(互G ]Q*x",Q M(YİR5&*a2?Y YWARa17V؞jA+츳~lHI >GJ5Ҙ#JDuEK,>OJu(Qz(RZNvQ=RQ8JAġDN9U҃Fi\OϚ`R >GJ5E G4Jy_sK}RTSM(=`"9#1?h GUA#JTšRia{(*9՞+=a82,ХQ=RM5Qz(ER 5TTS.RiQХ<(=`R/rKzX(= P yJAơ4uҨu.(}aj@y̐6J *TQ2J!2>!]BdTF5Q $N*y'P $9L1Cz(eR[|(eҜjZntwDATF4NԊ>򻋋:#'.dƑB52YHp [k7weoy&e ^^ǃEwn3wwWy=-aHJ}qxE#A(,XTi@޾RD(Miq HL~/Kq\"GjR;9#Ew0&/@("VL =TkUՂkn 򥃣~vQ{p&7c'M [ؕԄnX}RGID_e {R4/*]"I :~/~tHV<%(,(%H;PT ?ԝ_:RAr|'s_Pv$xa:O)q~o;J8-KਁpRJo* 4@+H ׍$VP *Fc1h|56іDJLf1e& pj Z)%fZ5:mE1n]_Ƭ %cY93He%_$QF !&8$9F*$JNƯJǠ UxdZ> _0V:H.1hef<(`\L4Ua8G&x16j۸^5C W, L|-) պ?4c%"__̠$y3G'osNww&b~ɗ VGCO`nED~95w&FU~{w_7ݻ:;cJ oR'~?m^,IXUB]pOz73^x}~"kMq--  QT=rS$| BO񛥖|NߝАJu5HR^R·3=&H3P&=N$ `WlܐF"tTE(=W [_&I0J]`e &)q]fޫּއDtPnvsrzdA& QnwNܴU$O}Gˑ<[&lUyFܞ4I!}L'@ڔ+KA+3?pyNy3OYO܇ZzHljr^l*S x.Itmy`Ofa&L)$ x89r"L7۔Lff4p<'Uz6m݋In-{4-Hev=Q]jRzN.3?jvd#QsrN*!LL J1JWtx1fz!s37 xį? ,bv;@h'22+T꼸(%&ːEM:h&V.nzEsrqlf[`g@\N?joQxHE0JHn*p!Rw_g$Ik2Az() çy:lX6尚eOx .)|żeݠJqO3p{I]ms7+,}˒Ulj뒳ˉWyĊV$sCJ302)%3G%3\/$WTAES&(jQ9]9'z6ӹc:dҲVjOGww}J0}^OAMZ/Z^^HA*/?y|Bf pJYWS[~Ս+`ysqq5h__m9>\_cЄÄFUs?.w|p{흀u)@>}{tsLcZ"}G=P}Qy"8JLa.g^( e8P|ϣ2ϝGm}7LP<ҡfBP ΪԄql GPX 8RJBcЁ ;2+=T*- P?$GzL1}+L8PP#NW JoO姮+ &G[-r<roaa~yw%p9.8*qMCg[VTL:u/.$R!YQi2[Z1V%3n%hYb dBYJlƸ[Ǵ!ZdHݬ׆<B2NK0O܂] a51b@b;qS<@ϲϨfx5 T{Sy%5Rj1n_z`kj17VJD;-->BV-3BnjM!Wy !!2ӲZ;mﴬEޭX/FGiY $S|z[”c4VN/`.7&vM٠#eoZ$\ԙF3s%qKW\ZFLPrmdĪuhj7˰n읽bz"Jj^Z[6IiAe d6)hV$2 !( PhE%. $J(H6#/&Bzc5@^wݥ]^Av , 8(i3+4z945"MDånȜ$21悱 RazɭZ 1ʷ &-IEo'dw2u-βLP qw%dAGmY#;<3A:Qm#ݿ#jl*1ŨGio'Q5IT[Jm)oZny5$Z@@H{#EޭX/FWG$eYq_3aq>nz)yS^\~G^c)G }N PS7?Ut{T5h 2b-ev_#]qaN [KxK5@\*ySS} `*7у0^j-4#ܔZU06,cYO4˭witzq6.^b?w:9esvp/8ƊUT!Z7OzAw wvV |0ڔ1|Q2Bx~^s^dO88}/"gbaG!Jq6ľ#Ļ0Қ[|1 OBqFa"-ee-V>Љ}Gw;*BʌoQ_BqƔTlW(~uޕ7YҵQ-];K $\DHZd )LRFsIq)IYbS)R+Ob|U5+WvnvVM}BK vvU[nΩn|̡>tźEc^NcB?|~Ԅ9EQUr=gqRNm0sV9w8˿^D(U /Z2j@1"I-HW68:@r$K@,'?SAlY+*GN|E ^y:<|0,a'6Hs'@]ګ U.tl?cWijvW,/u)74ZŒWc5q{*&RZ`)`2qBܖ|w BZ#quu}펮7\\Yʞ K9ڝIȆ--D)Lt#ZIP*0|n/M>"4ʁѭò*( Q("Ϩ8wS,PhH$<5ˑJ5.a ؄1D0JA !E[Ȥk ϕRVܦI"4Z5.e|v&Zr*n>Z_]-y\-GY/VrC5<)qs7k3TdPOW+zM|!*U]aYߋ|_0`Mfoo\u$tj0E6{lRanim*qH'Pcɻ4I+].dIzBI*qg}Je'm:mUI;H,rtd 圢AX]C_VaXeeCq$l8ӌo_adqf6'ծWMc-Em5{{) BS1/4'qfd(q5niT]@*D ڟ6/_} b.Yjq[D5c,IETך:I!jnȞls6Y=UZ<\WƂy"@ݟ@$ :&ZTZA3a>E!\}84ʻMP6IK|,稚ōWS[~ՍK8s(DŽgO3z3)|ieN|wIJ [}{tsSB`˕TZгu2SCKƨ!=Dp2D~p{r))Md Q оtױ?tݺgRʅJog.7ߕOHU` ڇZR){g(2W7}ƨ2olql{Mc=eoD46AZ\z=' !}ݐP QTdxAqCTaEӇrADxy4\?LjM$Bش7͢uV jB1"yl^xl'02 BIH.D<6mà}*rǦ׀%P9S9tBuL?Ule~TP5PdM3a%,BqFaԛM0! "uUK?AtЦ۱wD4_C4 S >nAPuE)AT][E.*{!8D70oyjl'Ydi'i@'Ҁ=WlW KF(C,D ]yb|h ހSa^N %ia &fo&;haVPnBrϻg;],qǼn_Ogg'}8+ۇz৥5_M'y3>) ?q)FqXI@vaς_}pgno4((9d* _ P5f/-ca3i䰏a{(MpRAPZJ LJpv ܔژJ LjRPJ5z% . PJJKi LjRPʘC2rSjPz("A)-RJȧn GR9rrqKAS}BL>(uJ/-i?nC)3>z(Rj:!'#JC)3nd0|./7O3qT? WQ@./7}xť]ŇpB[M1'_z(T8ʁtԗR:Qzb'= RbF_NkW5=D-fռJ^We PʍJԸ擧cF)\=!sRj8;JVOq$ PzZJM%=`P _%8$JʳjrB1S?rZAuRNPZJM97J/5 r}:{:fs5v/!tԗRPz(RX/-TJzpuH?%kOZ gdܔU1ZgB.qv9V%c.+^)P؏7fUiQ3z;%7%}؁m^IN;I?,$֭MҾ 5&Qt0&YQ;FECN7Z 2+ÔÛ\qմj95F,؍5jC.hN3w\fWWM+?xSSCEZd9jBHBLIY^GI(Ă?'/2P;^5ȏ@3MTP wHUEHL"AO9 f%AjI[de[ ̴YCYz|JIs6T[Ml4f&0nkgl|]ڰ`e,聑.CrZ?1gHE i %^̩ T6 VMR:H ^rTh(0`1J/U6v`]N&A>ng0%BݪcuƸ(/)6Dy*rd@ڋha,ϱ삢Y~)rV<ٷe.~ۗ:90Tș?ѢsyafwÔÔY *CR35Z󚜫'S@U'1(V;fNB/tϨFXc^ dD1B+QXG#KrM:H k2DVuAQl,X>{o+ZZ_ ɲYq<ĕhwM1CGNH'] _$]p>\K5f.`^"NX\ܮnGc,tMޱӳ/@SF<>^igއ~6g6" o..]`gV>BĪת/wKthPluU[D|A:ɒ'4JfPStQe5@ּ!/֦QֈC15>Uvy}P~]65xAO_;v/ZSGA{C\J <3R܉V^j}鯿bK'H|}w[^/U<*״#fߜxvu|Ί;~ [ Ve's۟xTeԪvoC}rY+RM/U'nT WU{67:*9c3̳AA4$8V~`  |6kNFj%M:FmD!˵Il[Tn+DGRHsTQQmvDZR?+kbWuES祿ww5i\=8U x'y, =ۛ\D͢Ãp+J52VzAi]M\ !)=h o R [9n?ѢcB< ͛b"l$T"XNXU&*r0F0leNLP,-9TaT]`7pgVGBf(MR;THCjša s%$،ARVGb< JBIDuSiu$Lr$ԑKxn@hT¨0=ٚh2x-X^ Uha(I2j$0̡&@!VJ<+z>hƧ&yy&1z繶0ٱ; pڽcsF2`}RC k׈ՙa|.*<uxqy- 6qlҵ<+a}5]kx&' :A^K8mgB۰}EBIBT<2ِ SP^ZWɮfZQ I/B(a m678-\ocEtʼsU`9wstJy[丷nkg>bo31[EjR0xDY:;c_!5juxߞ*T:5 zTXNF{]߉Ӵ^`F8I$AFډڱm2uBZ9nMϦDTE: Ȃ$$RU{d}NёɅ.UIn Z} b ]T_O ҋ,y DBRg` -r[)t*Ҫ2@ i5TZX+LD x:WHS<`1t đuYgw貦H'^.{A:*_`F[~dDCuH#)J%j+7+qP T;kܞ X^)mz&i)I=#tDyOTո'қ&3F :#'̑CXOKx'xg^e=h@7~笇Pz>{FO?<]}5G7W]Wce vSzGbPߖ7wO]xyw{uw?ӁUeO S[gj!tMv n|֟\Iʐz\q D6™ZxЙIjIV6ĕ|6BMӳ9Xz.PԷfw>Φg( 2#\_%ZQet6|FgS->r'Ue1V!Js4tY a{JG%O<@ȝ4f̎Mj)C턇GJnAbN4ӣ}ISb LT%Mi& qbo@n@?,ld 1~u>H"}3 8#x=؄6^`fv;ٝF]kvXVv1۲{gzQ;)k7ΡMGHkp^6, i^=֐|ع}ʮ|oZs%FVU@胫Dx.onysfA\U4m3:T|^:a/+۽o-o?^{2%g-?29|vfO{OMuP8/w٘Zd=[VȞqj$k^+ܺP̥Qr'd>f L7o>h»DVڪ'|oV (^+*luh=V\䅱G Pe\^j O,_V=G-iosJ(yh˭+ɉѨ*YJ *|MuyedWpԉur!6\X=SgG4R1-c{Ce:+.:DKE)= 1RG/xTFJŻc@:x#J)WyaETK.q.#+OL!w2* FW9MAei `0yym*S!߽ܡ]y8C8??e-&.Tow\:OY1*{l7% ׵ |[sNXF)-,]ׂ-a RڋZWʓ*m(ON:.v.8Qe6U)5@VЇ϶]>,Wx}evW:$qNޝ<hy&" ߕᚗP/RUj?=.Ǣ*!`6@F*A WQf:9$by.u"|7hYM:S f҂ɛŚ/im Цh:K2_Us~)m,ȁJWWOQR4I#r=9ͪZp]O//z 훞7V8eXzt}ŊIb. Q0@CwBP?\!6|t s<$c_a,t,+0 "X!ˌE"0n"o5B>DleҘZiN1)h;|)P&DJ!VZ t ZĂ2zKE^0F"[cZ@PO{k=3ěs ŌF9Ntkx֢ؗۥ=bcv_ErrXr%kJqd.`wSD]D? iڍsZjf'mON矣mdߜ5b9*+i>P2f룧B mg0; ޫzbiJGeXbZߞ4f6O>7jU!ۥttxsY+yJ+ќ OFA(믤DBD!Lf0w #C@s拏+ <消D=&R((͌2nI վwwqv[(UԹ{”MKznxi *ޜ]_Lh= Vs8rJ{0˺MFlߺT1P3h|A>Ec^%3T])z't/m9 a.~?k%DԎoUբiE[dZHSs4y7-h:h =Rtqi &[^#k=_aj]k=fhD\f)P "S( i]B5SE}YZqq?Y*bTA1F U [z* ] lg>Ug4!i0`"``:qB|UPP"(9XY΀UWca Hwm=nKz=HS}60 @p./AIzŎjRQI)̈jVW}]U} HK҉.b@oHQD1I5:R")3\H#}9*@Ԛ $wݲs&g=vY~>WK[d[Q7D·w?pԘqiFv6ivGɘbJ T=:<,~qmmkM (=3D ʨĤ'~J2r UHUCNT^FW>S"jXqؕ̏̕3?F00~6Rx;NdU; ԌRXnVV(+%RCY몯P-eW>B'-,UwP8i6<‹RE(J]Zy pʟn݊hV2n5KgKW%]VcQU֫GSFjr];T| ۯ /Waa'ϡkԂ|nzﻂ!DY7 sթV++u3ncȫМ7|3,TŽk,8ccb +bVsu,5 wÅ[ k8EȲ0ZWK,#~astz9>:|x9O89=,nR>ց/[ M&IX_p?a>8ΦϐH]LbHf>mZ?ѳ,d^0eBsTIӔJލ w}G֢M#[xޭ |tsV<-rܭcwTN8 rw'BsC)X?7`"ͭt):?Ӈ;K yCa6+[Q 2$pT&WI10]béG㕍)I3ڬa'Xr9\t-5\Cn4ؕĤR3AO?uI8~BE:)sTZ51G'r#-#ptS4Y8# ͊wh\gx e2Ѷ>i"plY={"^|;ӤE6=o0ՎDIի?@`}` hŮ(^P([ s!@#]O5#d:Sw9{_xd> `Ȱ?(:ht ._s||B2u8t*I{(Lı$ػBEK$fTc|U !Z{(c౶ %($2PjCD=Gʪў?ϏCmST1öOe\KS)e <]aH`S>g,WZpYEɺ4*$pAUn8Fuv6<=L=k 'qA$?aoEi2flq>}.=n~mj?dzۉK~E$=_ k?x8x-'X8t?/>qK^hqoćlO|M|nx~ed@&M#WHpҪ*JS\ܼuxJ:93gwI1X+kRʭVd" )8Otq`)c^J)cww@as`ԮLʋ#*9s8K~vlkKa'<a-h`<ϡ-p=,lYo,,|! JJR!uĪFrlk}s4;:=?e:~HH&N&M]d8gZ`-Uanj- s"i$:6ZRab 5'q},L$%uqer{ss |l屫zuKlU$@:@3/~y~™d6nx>+O24sXOGmX;0>0n?ͬzjg7Nަ_G?@l;/eOd=~,}|doC`8JcbUU`nko,di򥅬F b+K%TV("]J-%XvcT2M%& I+I][ه;l>k-hJ#ta('UYjQz(e?Ju,}{((c`)m"U$# "jEi$RCK5ԙXZ8ԁeZKuh! Sl)#)I2ThlHc#ũ2, ZH!Ni"KS)$J8YJDIlw8ԀyhJ!RJ:I}U){?v~0Rm(: xm3/f rLjGyTRah"ЗfRSS8uףPL!(e(.tWe*y4,/yh'P.ϞiR$>%J9 Ci.yT0 >RPIMgR6-2Rua_ꫲQz(%& xRNtWe SGY02-JI}U*a'C@ ͞RsDKEN/UYjIsF <#(NDR__IҳFiI.gO\N4lWJtė2Lje??orR\/J Ci&51T(=kT1R)ސ7oZ2$/zYJhW%YqTXŋB֕zt[A~{7z]r?L4[w /nQ|EɺܑH W,|xŘFe0blgz1{œ*hUJGQ? u#)ݰ=k}EyKsKⵀ'4'-ȏ|6Mo(8lT{F) ' 2Ўw$԰#wK*ceLae}j Ľ |]CU{W8vQ>>Tg7r='f5ɕ ne, $1NRRq)ΘES`NHR453&i"vt%P3_f<.m2`̖jX8D1SeTFRAL4FSiJF" <#P+S dX&dmeUCbt$:$zAw79Wp'T>}5NRyp|^g򳢘ύ6r΁GA" (hF!@{\2F39McvX%T.vw*e4"B9֊(ED"Ic,21N%WQ;az.8NA2&4KTECKmYZx8'w_S]h|͒gLti ^_/oW-]r&4?}~Eެ>/>W+>&GM|vwx9hQqSt6_",Bw?X*([;RS4*&X= A(0C/-߱m!Ɋ~֞6(Sɱ/W6ѕz5 plzX cT&%OɫʮAU1#&(Q q`jVa bo_ꫲԔ!g=CV*dhݐ* 62*t`#ֱ:ꗫc>V`2G/~,%?6M>NfQ;jN7jВM0(dm/®9]5`<TP!14?v׃]fxG*Lf뙄:W}?74tQTہ\tf K64<-7_hy2cr#GR-}aod ka*nXwڬ\+ о81tm$?|Pkc_=u7 &'-j*@m4)H@slo]}I+s4/&,N!L9(ӪB|NkItWgU:"7޽>\jŊ 'YR"FwjJ}ڇZK&Sywɯo{Co8x>d?ѹ|pKϏI.v],_}..w^v^dkRvb+oWe)KV <-CjQ,YŅK8)4Oݝ|nyAN/ԅ p\4UCHqE]bqөLt͟_jHޞIs1zEQLaaFip#̩]6Lxmw<9 4s;h~/Yb&/q_zjeF\V(ITl GXhTca+=/^< NH9bLeo"p+gɆs67T33I*چ]]S E') uvO=$A 1,1}dI(T)PDt(k!*_HtDh$ Lk @"´%@b"Yi@5pv$&<`_uth Cbjz$,fD sꕑa&\4f?=}O#!$jd; m'F#*bSe)SpT1 ujS+Ec(zhJ<F0 'M~*I*SÕ{ppHNޜQVݿ|X;g#.xdlbGN+Mh;J%?{ɍa8 n,V%d!.bef7V^ɒցp{Ȟ&.VUŪ_uᘏb&UF0sQz~9^<,|Xp8F¹NItr9M|@sH=}k:G>,,@ aҐ` #_ К [3^Uh LuMFch Y7H e&5olȵ)Ƃ<]xUaFyݰ]2 :^C%NB`6(y:9\nC$R`6H"t23 tRs>v u" tr#^i2OPif vnnqbΒhVY_UT p*1kFrQ[xTrNF-[ƚ}#ʚIVZsm-X^MFIYoY[yx"S,C4VxJ!+kT?UYyJQf${1)MXdB+1'S[J+sXkjL2C6Kh4R̐޴nLB<8q_zx4piN0|4ѥ`LS$*˄OM'O1*]9ݹJMbbn #z'aIdN aA{ [x HҠPs39TN[b턕ݝOu:Ւ 'αЫ_0C7"vj:%Uq>!7(Hb~##8U D| P} DyuI:t o$M4~|LcAAX\ U aJ@ 8!1=-WNh3Z?!tYzh(X E[H5[d^bKGIo>Ch戮jdh#HmIWNz)0^o79Y.X![jh߇`48pW i4W[^;_%~`i=-Q|zZ"yݗ/O11 ^x>*|҇WÉ)13}V_(Y1\R45ƞdAGpDHbCKJ=ʨlieg%F %@saw uLNx*3ߎdE1/c|)/;2yuNuw=nMř#jܑ"A3QVd2A/q\pzgqˍsL;` * ffWeRqݪ4#ʤ HxeR2ӣKq7II(мQ甂T)jҏ=ohf:n}>H!]a!;aLه[JzҼeFr:Suq&PFvmf1 s4<߇ďG0 y5P[keSWxb<>w}﷿p]?x3X =_ W-OOO~ \ Zûk/Su/QmB1$r k}ds `f盏 )uYD޴J4jxg]Lbo63P'&|gy "D @HDꑗC$&BFJ"vŰbq63{3p9NL1Sq'TP ]VAy^.(GD׵?y A)5͟DG$A$zMɥvq- +/vEeWf4LJrF^,ݑtόscO|㍈fŠ(A<;a`cy;V-EޣkPń%Fy}tG|r ;ߓ * aFs"I&0 =~ A=u~?m+}H?Bz= ^u&/<_VۂӭDžEc??>Ǡj~ TwLjjJmX $+WZLJ֘1d gW 5]ۗU~yCq׼"~'ًw[g|C;(R 79m1Fg5\XaYtQUQ(u@XB$x #&X-h z7+wzPG^bd(#`#sk^n*Y`MiFRRzsMB٪YZ'ӌlԲߨ%@6.st).Ldrg˥ 2H) ]R0֑[r:۔q/ +!Io4T n%ՍNLkWҖH`Qz`~B‚ əw a7lG-Fn-Vn!N=% +j]r0J($ QmOA K-{wkllbOPȱNcIZ0J2oAx_b6(ql0$E~$ AqC"Xy/SfI>H:ȣAf{J$Lpw~rC8'ܕSofZp"XE ٬3w%ZuG=<JH2S%5}I47;9C kDv[RFQd>j;gotw%CPFCP-@MFT5fd '!@uE̾;jԽ'@$3gqLlBS#"8I=>t wn6&ٙEF{ o9l~Ze_FY<ߚe1b fϻ?ص{ݗO.;*Əy7/K/2+uxy}qlp/SA bކbˆW(59\j>Lj#@s=R~tOQ./yzҐ\Ets5Wk˺׳9X\ĨN7Rۄi;ںFZ>4+W,ܶnS3uA~#źMEh-#Һ!_SO{uxJ 8bЫ8qC81o؉ ð7/ L|i`赁!Y~zFLs2$Vn[[mk7qRI)cjjM)Bde<ԄՕ-e*w]ݓ>wF6S+Ӛ>} W.sd杉Y᛭k YHYxsM%r+|]6oJi%?'ZP'g vv<ĭ XRQKp5Պ!7&0djGM=ʦBgv"\||ltnsqmNڦOZuIT+O6USP5@5q= &i$GGRM^MX3OکFf6\$b =z pXںlvT*em1j\I$.&1$2Yq1DxbrQhOc<\^q9<[⒤Vzyl23<[rʳAЛ,t&u`Q=l޿"Xbo>z:pewmrE>kۇJ`y~ ҟ?͕=>] `ξ߭A#Qe=&P yȽg @UojΫsy~SU䬴`n:8j{"O&J0u1gmiB &fjlEKOK5c1Z+@ inW^}rU,meUO 4^Z n | n6W ung/H\̘̹#/a[$ "9<7U( BYh1؛,#JK ;8dN_fM- Yv.4?zRe;?wEr}nkAKO!g!:M U~ .1W͏P lqE1RL7>WXx ra.j0CN{:S 3;|1_9Yk 3c8Yv7,>&ÿqxۗOr58m-t)6ԋ0Y֣4F _$IO/K<r0##Ɂ_%|$ raP0ԛHĈ_p7_mF"K4bt\FwrPJ衡.guL4WZ9&r &@14y났Õ^ xbj##^Jk+!81†96E/;,b )!p`K_ضZ; Cj{$oQ UTdU=Y4a4ҕ,- Z;{ҼTTC^^(^EF 2,1(}XOZx_PbsƷ۟ bJ =?kmFM#A[VRz5BeEc-Z@Q)C~QIu%@ۑD%WRZչInbߛ=Ӿ7 $ |f"1T+`c6d鴫jDeîT%UTBUjPu`Qx iug%4e$(-J8nKel/1Z&H_0i#D0̊R צᅬWd]DŽUDBm/s4 eu{ wDm4>62JܷRKVVre%*rZ|||tz׿o)k>>f_Rbw7ɀ?)E7Ip^^tۿKćU¦ nظ[E_"y]䯁?uG$@r&cS(qлQb[.16˂f2_/݆ 7bX U3z\ bL')m]ɼ[~'NwB.D{۔56ȁ;z~N7ntd} 2 5ĹVj o,g]loD NQK`}^}%Hfgm@qV |Ҵ+}N}[y[i\o@ҸR{_"~VWwoۺ{It*}|ꟊu'#ϟv[Kz/)VZםR YA.8T2|)AR ќJI¡Js\*o@Q v;'TɐCJgBhBP8S/rS墉-;ah%5J\;Ǫy'I!@xQj9$uoEC|@N|BY4|Y>,fi×@`7-6.w wgya Y/,.(ڍbAd;W-e?2thV9vB>h-եaDž#Qu,YFSx%YIJ@ rcJ@=LWiy3KB%@MTVMmj*0J`eKM+,*PJS! όv!oCHm"2I}{ 5oyDZYMvbF g}0%ITAJ[iIaR2eƱ.p"*FWƔBt]m?- `sG~ǂr,rIa`V@pVj`hY-KdӀR k}HJmr04LjʐdF7Gm+h6Q])E5QZk-VLZbq\޳:RWzSHzN}(C:QdT1I|BIg}h2ʑHcY*RX|;14(z4z̻Ԛ+nxja6l\)=S3Ht!HK^ Vh9 x&0szx"&4m)RODŽߛD3o\ k #5+!2jpnIF s28/a͝!]yL(I Iq]~fo:D570{^s؜\*Ĺ8l)Z2(z8Q_j )~RgZmhca-qGU'"&Cv)boҵϽ}B\ bsB9?:U+B&_Dra!nY6E}pWEbc:Hn󭻼N%MG7_݆ 76_q{7DXbc:Hn3"Nyp;"r&ۦ,L=927%ST fK}/Log] gnXX>'R6RY9dK+U2J[oSbd6vEXi\ooxne cp{(h'^U+G4԰i !!UHmjǀk]bYؚn>b6@bPnk'}zA}a;TP1jVx;~6ˏ= jy9Y),gQ C0*1-˱T)GH_fy2tO+|b;GZR;OACbuu/ i6Y.k V]*VեrNKw^ü$ΡrͣuUh}@r4Z@xu~B2 Ձ\9.[dI ɜ/t^K :đ3S_^6[H҉B N_[D;[5y2Q}pƛ0ȃ 04R N"'b=y+U+LXJJvfy5SuKȦ'pv9LoA!x/uv cP;~#}l>tjc_9D a!n96e:1dлY+r11oxV_^ 1既Ez!,M4˦O[2JvkXϑ}5O]}(ȋ$n7WsW 7_\z|pWןQ~z)IZ Xbͦ65f}W_OWOFUR>sp^lˏ3@6ew{m;/ipF}`bo xx9wSfWt!ýK!Wk~={^=YY`g5Xygm. 8TKfn5KUJ=͠!$>JfKU wھ'TɀC)pdysF '%/ޡϾ1y4 ;p4pQH+tBMSyYYe׎ 8UZX&d-sw;_W6^8Q>74LIw!^xmoq!ta#uaWa.LD=x98BsA{EgH4C)B:/!}p5$(eclh, IkmFE[b` Afy@m-$gf8-nɒV_$OlfQ]U_,VV,L*E=xv qa3 Uи,r-l1]DpaLTmUvO& VE$-,K@8 Xr{}qQz]؀D`*leyTBkis>mK=J&z4hngqFҜ,YZAKoKm ꍹQ $|V~] ]|hSOcTN[3˔mus3>.`Xig:g-s=E1x=Q5ͷʞmMX+h&-)|pA+#\kc$SCQ PH)9Qf WoA?]ܥ/eC'n]#*MM8ܐ`5s߿Rkڶ ;&z"yavSWoq3|\IcWy+KOWj&dk>y5TJ,oUr?x/CT,z>\=NWʚQ—ԨkY)'ޙ'eG#P*A9gflfC@N9O`U7k|*OLxn!9, z*&g&>}jI:mw2XWE6y  s?Y"?M+]qeшTY+}{0ȧA%cdNGƿN2$~׎«cdWrM5h G*,"V2#B&mb9wHyǟ XF@7^(Q;Pk)yGͲPJ/]/y9W>FZn9zk;85;Q\6 BdFZkM޶ƸtM|SgɸeJ hnTIբrDx,oס >:x?K!K<(@Y215\LB+PDd?I>h8i8i8if Yh/gcMSV D{C=SD *(l~M|,1w9ꬖUT7˲5}`]cU6)_֕[a܍o[cQD%?Vms(aq]*Vswnһ卙<~u#t/s*~Zv4:j1'm%9US7CQZ4StU~R X.(R m dkk-p@ ukND}ܹTYToFR gls_a%_t㮪UӒ#>~3 Q~+vJjپܦRwS0AՉ7qٛ/k~/aNZ(on èrr1j/GRPKv&@Howwqخ$ ~6jAˋckT [ yHoT7O_XZuua,U|TE*Pź3YhLwXm?َn/F|:]"gnlDxTV&{~B9MWAoRtmƸPTUTi_U'qB{s mE@DRnDA vLpժ&o{X$ OY-GG S { ˼͎,D?e۬JB?lPHx@W;s8V־ՙ!J1b jndaVAh]sjJp* CᅵRh%8<,rR Dٷ DJD7ܦ=j$ 8AQV11 FBd|NkNƍ%<ÃAyV`rWǠPdV%&@|v*I JHI>|kk{_UZmGŘdrLXP4CBE& !J+`$֌E}]kG)]} tkNbNxgH_/ۛ^-B)d5edl3p4i V=!_ORFQa)\s( !hM|S$SRs˴RtN'&,Ty"JՊ01 LX=+ bT1o\ظ-Y 7d u7 OL&ђx"ޤ&\Q!fvaTB ɷMbx ,9*&%y]/nDvWǛ֍RU@ esCMVKU7wːZ"Rާ}S.fq&6|oppx`}w?arv9Eq\*:c& 6(E񇨺)~ M dBrI?쭚_>Mջo+؟~X#s4XU K A0}9k٬x}WcoVkwr!.N.|z d1?NJ`rAoNsTk4yҏD޹gYFycE:Vo}K#Tԃ3^yOZ{p7Ԫhʃ8XgpMW:u.8}phpKT]pn:lǕY\(sh3zg4ZxIxaHqߓ1@{>OJ N3`ZvRCU0j2("9IѡYuDWE.ƒ<^ n3t}|q_؟*2c熂jǓzBʃ%J[L8Mg7$Islleedf v-ޯǺ\szȳzwMF+<H~6KbgRMh9$c/sFLLZ 8WS|mՈjӊ\5I[u+=j4[QdKe4EZdzUy >v`H?ErzZۏۛ8=CRҢݵ"Xg;]BqWBFFxq< @!(!RDSXƕMndp"pVh aQ'K#)qWpą"FF)A:ҍؘ}&W~yr$u$HZ$U/?ߎ @Q$(RB9 A)E>b2`X`2rEn4L:ꓛEǂ[m58Db䭅 JlУZ7 ˖6=J+'oP48L\RxU"s3A&ȗc|FM7ETdtĢsD,FWL2ߔS36<4eFkH2xd5[i#A x!i07.'TF2V"ɓ?"[oI';7BNaG))73 ԔۤM~mv?)'慉bu g?"LXSFkᒨ3м/5 Rm{}2$ӊt3VNAtW,"GgIv٣BS-aZGKٺt,G_1V9iŘ[Hƾ#8Ob \G㚾V>Ok!*^mkb|G2N4__ǹ;単zʽg:b][o[9+Av[X.a0?^-9lKbeG3rs4H*օ*X>`(gkd-뛰dGnlfHY !#M, ɒmk%9eSPdV\2p\U: DHD JxHNufzC췵qS~`/~\.nC]澺s oءp0j"I#3TԬS",ͥFO*/)sHWC6bggEl j<ś lF]*#!y!nW V=Y̾d*Ĥkud.bZLpv] +i@wr 2cS΀u "ښKNG)ϣ#ny6 lq^mREL"z\א|94=y=)d'HI#GMˬbdXĽa1kDu֙Ee6J(dAC#ooSM#=9α@gh)A +i3 hRv|j7ev؁J5q omb2oW1K9#ۥ<>W^>_.cvY8268lt~z´CBi3>ʼ3ٳuWAl,Efb /Cq&+ݲJz5~)HBfU;cua5-^)_}/piçCxFlܧVIaY»w1C*.EK$`x?[:)E&KRhMrJl#ZY9<_ޭ?Γ{dN~{nvZidkm-eO}7rE?lG_ r#),G?@_ۡ;7CxCsBC )+:!PR䪤 :QȤ<a )E<WqSYXSǁRZaJ#Bji-?3@v$O=p%(ȵlW8$QvM46kpa':[7a{pwة$>=FU7^Ar!a}jR UEb-N&@1@gC1{U&wj{o_C=Kd M˰EǬ @6?uMv/BK9ی[U۵2&W6W)5Mv9?dbڗj[`B"8)EaKJФ?^Gjy9>͇O%(l/VmDȒ0}"Y̔+;fba7)Sˑmqde4G`x^&q͞]+d>׏jWbj !.RkHeʴ甃AFc.r;Q+Z+))-dAT t1Fj*mϲT&ҴN8݈x,g[lؖa>i (6yjP\p. em+HQ3DyGKըøzZ=-nR*vgYsY: tR'..k%4$VXUgC\Lp>=i桴HU#ò6Znn+ze؄AgEfi^FGgJ4ڑ>ܾ0Vg؁5$,J{r>֥oLu}c r#kǖAT|bM v#$VBN5Pʖ!zn|V?B_(·ON6h;Uخf J=ns5:Η ti b/0dIʠ!yV A$}AV% wsn#hQA;){[7QGOB.X`;(dߒyǮny*// ]Ȭf 6"U|5,o X#ir`@;w">vju Vd(@zhCB-*mϒ-؏klʢ{>Ͳ:Ko%`0D_IL{ٸ)ssW)pNavUU("1%?=8ͳ Xz+Gj+kCRvE4Y+)dfƭDJX0jďa1o!ϮK6J:k?x^K-ϵm$MgtBIzڮrmsRg@*A|Y7_U4LwYOiv#G&[T.l"zDKV$|, Tj+NvYZK(w.dNd(2&Qm"gבPt[l3ijU#׿[߾^Y#ֿB.ɞ_6R.in`U)p =nTMEĜH2B쒳ngI{tx~(ôDW벭U{Oq޻"a~< U*WlGJtjEJ-Rm&ˑKbw֝trT1m#" &(V2AT*czJk%οʴkbDl/g\5:}M dpc&b8H~Di@c#JG @*|ڬB 3rQ싞ޗ?@İ/lz4 UVe*D!Y4A9NGoXs61x~S(zW1 t!YIuU}\(euchy6z~l-0c+yA<뢦TPHy@Ђ[4$ϣw}6W ã5&Yl>ͲZdxS ZANvCL& X6G֭~qQ+&H1'[PPpDNI[]$p,"φ "*8A[as AC(CO01ޣ:Ɵ/*ղ*]UW.R{wŒWGp{R:xjdWMn gϚ'gdheETrnQf ~ c $QˌħH:$Uq%_a@ 9c" Uvwҳ̵D{֊1$_L<^&NGzw_oߣ;?RPs"P }i{1  @RǂSy%83x[f:㠐᭐\ϗޓ 'bXc5;=,ؚ3ETG%5}|wUi&ѰS1ߦMxզ=+>q>WNXhQ +/*)f7|V|8kSٻ6%ٷM}.}$ΗM@)e[=EJC͐ -UuU{`#'% sz16AMBBAdHlA$Xs*G4gL TK^0zeٖ\P K$_ub$?_0Ҿb8i024"{:4sf|P i;A^&^/>/נp $DƲE>"dԝJ<O1ʎ#ȭ-r"Q4BP4I-V׽-2\>%@>Mc#H j|a\{zS[s^{suiw%sУsBs/u 91T R3)m rTc*vɝi71B'qd1c[p# b~29XH\EB$N>her , cEՓ]?ݽA)4鰟{8Qo;.x2xS>VpA<|f}>k&_x 0J](vYSL3_頜an 2/}\++5>M)UgՠѮ&맋jUNH r5z8( . (AɭN7O1nV Zuo 7[䣬 6m0[L fGIqB0$m$,OL:7zY1id1/qJuԱqҙ%Pgq<ts!B~KI@|5=>L|d|59/m0D-[?1},"z#~΅d^Z<0r_؋z+B+6N4 Lq|Az-qYI}z |6iC_GvrIj$J`hb2CET(œ(q[ (lC_e7w MSWy #%1$I)&FμM( ab AFg4w `45a4]%ayHc/ڲjEu A'0"1O`3(iK\5-z)Wꣻw`|} 'Y?. e$QdjCioOqGPi 鑭5;8'p)&3EM]Y9`'xJ7yϓ?m m>Q]ٍ>Mkwn;M ]tO>CmaȔTGt.Xcv@ޒD-[|ɚڭ h#=Mcvڭ!D3vnE0-[+.u[ 755evܺ_n/3`e7h,lzQ7V䳣SzF]zg_e]nmW;WEtx]SJAsIДgh8AqT,I>o4J b[T~C&Z'qjC@0d|=$6+-͜h:|֘r4OMCm&f09huIwl}քT_^~Ejmu猴&e~?(gy LpȫOIIn9t"%c^REuò܉`iv=a>_Cg29`D&$pAi0eK6lԴK.&Hj 8fϥ$0x"r*$,tQMCmפw$#wl}TT*4᧨e?*nM[Wf0אRL((i&"#5r T6.WDkr#k\ ԇp8.#͕A4F6‰WR6\\za@{>N*焳(WCnBy #E5v"H#>fFLIbF&%*֑ _?55.Dprb $9JICvևiJ2aP_/ X%-, k۔$p1pަZzFڸHw p>46E1^_j,$TDӏ~yIJ@c S|؛!qŔM /B96B$VAPҒQZQQ{c!ʇnIE!?~AȘJ*{$I9DC^7)K70XxB s?%,w31!QsG q (gMyO۞ YOJE͉UcJ]>[5Q\)}zRj^Z[s]kg;tV_i&A9ދ<.s![5 rbO 'D}"x܍9?R4gÅ>'~b3=[) זO,Y|@y|bܨްy+k`ies7eKuu5t޼{[wlQ ǣ\-J?:[ 7bJpna}4+pk~#6Hd&\3$xֈeoŘ<st\mn'+ rJq6H\?Η5hwiqnK;TMPc" hz oj$2<㲄f EkDvx] 1ޔmVMSלj`4'π"Ȣ6=#Pb 0  s''2e\-V<$'()±Cɰ@8wQ  `ɝ Ty|ORf&}mu!JҭgA!-C+/ e1[ Jā. gY{+vZ&` ;,v$|`ٶ'c&3WlvKl$M֋UE G Q"bqŎӈ 1[nK 䖱 %BPs^HAeU20Jr̕z>x{+SgB,ycϬf}bh@X|%A2`G áTGӆH裎}}i:!P39 _+9ԺFs1j!y51 ϴL8!|jY:!@ZWG zNf0xes2Qf9q%ɹENW [XaagpJ|+b,ǾYF찹 MϰAl77zQV~9K(Lg29ب`X2wo_zoy!/l-nGkWŚ[ݬ^/l7=eZڌv1p]GL$܅>׺/Z.εC,,"/ҹR\.i J^b73n>s|*WZIMf}A^xɼ>UavFMv7>kґ5+ҽBI'yZ-{ AwE03tN[f"Lo"2ȍ,XI"iZ9kleڑH/QBۏӏUǥ(J7{WLfkQ^?~-gp'6/(GC_n`3Ƴz_> p4Ųw\!ۿho#H{œ޵28qN)Io0O[+"| \&s3prAsƻ9;t7O'oOf 5=^w=>fE -ȤF(lr[#{mAaǢR}G#`QMޛlĜP}r`-]HGVpDMVA쾝ZFf5ޗǜSMXɐ1&M'ϘKc\)pwF14~G h7nꪕޯAj?5k%nՈtU3;W7qPMj:8HtH :9 kn~<̬[; _}*]fJS뇃LCti sPUa;&ʩD-L֛W& >`~Y,֣SS] =j͛n+x ޜbB-Mrc7 8X]U lFx/T.-}fߦUl^ i#*#nNܰd\L֛poWH%%~p:wOfM4SS=~osק_O.n߯?{]OVR cqfӞ ;PJ =M.ӺͤՆ;nHdX+w[`<'TIĚ3SkXT7Qk >JA_|n"@W:;VpoGܘ@v֛U"H`kw Z08&'`==hF̣tk]jE7A:IkmII2פu-F j UV[ع?;hb"hfՋxVOFr8Mo+ᝏ襐z$/nw?\HR!- ufoz7FmBz/aT}E>h7T˸{켈KhIQqy΀{W&CU&wr0=S^#>%[*JSI4Ef̳ހ;M54]g$<< #Ƣ% dEPޑ~;$}GчQcad]_RǼ;lL3Mx0J[%E9Ncȗ6/ |Nw^xXxWe_u oEu㨻e`i 1ra ^9A em8by&&<&N)E~#tq{S,Qb,Yh%w’QX-fZi]+B< egzt$4>ԍԻI7'ޟM y ͒99=yShU miuE15}yml%*+rˇ{9b;㋙U Ҽ_aox\ʨd6HçRgYK2 -ڒТ]t0}V '$R%N clƶ7.s c~!qn1)\1+$mQIK9Ëy꽮equmm t#P -]"xBܭ=\{1{KHBt̛5(Ss"^*EaeT\8^E!ORne{yԖQvnM|Fr%2rYP8Ng3q`hRE 9ٔIikzaE,c8߹EZRT/Bz/Q%P Vvr}P{Yß fI@e?<>gj^CY ,; {fh)0jc U*!σLɴlPIƕ% fCCuS6!eͻ1 I`Q`: "o%÷R ;]&'Li57t](ͷ-Hn\m#}\C)$jY*Նxn=u_t+Jyxeѭ+ %r0a9ZYɶζcnybUǵnrU[gr0،\scUw3 *5-j7I ˠb[u7F(vWnKj)yx Zr0U J&\:"AT&?I $ؽ>[Z2{lwwRu21 D]ZA ZDoW޴ҬY=:GCǔй(*+|Sp>XXkzŅSds5iPsGi ۇH>[Z:&@ߢl/ MJ jKZվ&孞GХIͣO&[JMRi#RҠV1^q ]GZAsI\J2+y/#(th E)Qmt*A[ZGj?O~߯*w1 ~Zwq>=6hjl$_-}18PRd) `t2 -8A@0r`4&,OP{ :^A6*ZkfOivUJ۝ n/6K6 )+Arvzj&ǵ@eF+G3[1}A\r nuLwl%P(BחPӳP41 5Oz85| F'8j:)ުfjSLHD>zeoH[+ѭ b$Hy:#yZK[#ίl uo[A]K3o8Í85>w-M>fEjR2ECLPzvr[`4ʾ>? )yѵ}XPkXZjz2OhѬ|F;V;[)SL7׮VEuۆv5n zH!!S-^ju7dbf YwG( xXJr4PPYtK>VX]wk),a"v![lYHS .ʥ$1诅E&gCS5Er^?$‘.{how$ɭ'NgжP^}V!ϻe>zjI;h=Xbw27$ 'Rt] yGH4Ҩިڲ9#{YbcݙOs{ֱ0nߝbMt^Qq =>\^p9@1I~0SDD .z,<* /.2 5NI vUpVz6?UI#*LͳM=E1B1EQ35o͗BfMZ10?J ǫFւ'vu2qjsW'p[{*-!hefAHԊQtL.DPXR<^9]NZ?=hrMHg<7:$ɡro(OGzFqxo9Pnw*@ޑ_1A"wB'Iӡ"NÒ#LZbƦvvW֐Iن׉:>9;K%@WZAx45$}X#mwڵz5h_ZۿVCg[zu!]Ria0_/zRWq*_ܬmn$*ZW?=weϿ*νL(o+ ZOhLڐvvKA褾v?)h4햞/\DSdb1v sn4wtnZҫ"[2%5 PϽ\OfY7@QR)_>Mԫ/ϺZ?h7;!3":wxȳl.}D:s>[糧]ԭ,ߝ>l7瘈%r )<;l|^[}˕rc˻[+eVw)DAG]л?no2zwvO 4f5;{{Zql FxAw"2~ ʇ y&qߑ6So?Ci1"kZbwk*h:uo Hzpw{jjg_Z ݴ4135±WBEB-#SԑjUd:4u% F;#E) /јٜNUwn[XrZaƹ( QQ–DUj}yd5$jR9Au%ju7>O[#O!4C'^HOHmt?2 ,Qjr|ˁmPєLIA-̜ [L>P`jrU:Po$APPuke κZ&uß?WeM{](!{ ]6RoJq !PUSlm:: ޸jcn7Saߜ< Le]1BKi8hB2V8&lLg 0(`K(,d-︯l="_P~s!5 %1)ej_#DԊmq#1GvGG^(@(Z M3EpBJλY#1Ϊi -,,l:Fc.k wF*lUIʢ$H# Cu'" kɈMI) SKAMadJ󒔚auUR-tYH:HDJQj@!XJ,sǽs9ʹ%Ֆ+.ₕV%I"wm-CƲ83>~V:Xk+Yw[>;1akn0.⠑+\(N8:t}pEĆ5}a(N~Z**h8X#3{7[Ub{!/0S#N}d>+ܩ*RӌC4+,T\K>Y٧LpьY,'z׿to Dsbd^"CEl.fHsod&4VN7I @ 4;\{e~ow:HVNGUM(3O VYA(͜/#(j%Fg OANː7K=7M-x ̢ȬG1ade < ٯMuj$wԛI̛ ַMCug.Z};:pDp. (#$S\ Ahiׇ~NFަ??C-*Ѯ:y$+Yqq)Vx/^./\^z r\3^,0Z -@PcYr˵CD[n\菋7apF.>T>ҋ)n?W&' K).b4A>8W>F.(ioqĹZCQcKl&'T%ۺ]t%AxH:3u?H\{dbB°(4S}1ا+ЮwXG '&ş#Uz]9U5f]0q$x)P.^-V(9'Zy9. H.,`!2Dcc,ufn%[DR, aȊ7vۉx{"IBQD^^>59 n$/͑Jb@ ZRjZXs0Ģ\ S**d}x]ԏ"YMaqVS$6,^Ylυŏ~>0RH[BQAǣD1(q3߿K6(:"oӢ"$CdEE SZP.ew BSPh  ߷yk$hpx?įF]$@O!_):ʆy\p3Qs[Cy-'9 zU&Ω}Bk#"0ϵR\5*zlN"sSDsOf0U(SҙøύCߑ|ϥ).۝L֩_?rF΀4(" 25hP"0LI/,GI!A#'ζDGpߎ$ rO^Q|Y;ݑ 6!Y D֍ur\tI{du4jpbF䬗ӡH DVS1>pZʄSi38]sOSO1 NDCBpM`\cvKA褾vyn <JI}G6xZ4Dj>$ b2|AG郗s髕ͽ,֛߽p‡:&E?Uq{ͣ]Gmy[Jx@1"٣{'h;Cr&ŀcDԱShd':ٯ~ztu F_~\4LܥRmSZ mD^Ta\'/\yuP7+  $lIC_6u[D:=+@UPo™ CZޕB0;G^>NRD ~D OcQw[0O4 -uA DA~y]h5GiH|ڼ,-/JVBYDb#9*"Dfwc6P yM]*%HsNt` Y8lTL]Sk:6,Z/{DщI>oH8Hђ8z}Mj:;`Efu%W8GK1CP>C 3D,% ǟۭluuouN/Pd?K .3>g&"Z ͕ /V (-v$gfaxQW{e!Ѡ-ɘ`$s'π \J("x*9>xy< z{uͬNO4?f|TPdE(c 噔ʌȀªN R1eҮj74DH%5d]}JwANSr!@svnU;e$vUۛ`}HdЪ"kin+%{ @lf15Ri)eGt~G9I*-x(r)S9͎r: gб{MŔ0!9492līԌd.HS7+a-1ǤDL mAtclK]R ͋ASQQ/֞ծ%ܮKIcֻ|,z ^Zx"#MĵiMEIFͿNILÖܡ7 =i;Df,pT"Od,ߔ>.  z1p*2YT[et5&~6΃H"ۜT@Ȳan`_8޴Vp}FE4߶ya 3 ;e)g - -FQ%Rg]fn.!/J!T>^X;wq΂.U}QcV̎7 68 5bJulS&ٲ믛u,*SQR}܂D),jWr}fOg?N#;FB{g3Lh;½\ǍD%xޫMBM߅yB0B8+ fx=Asj = :py VA(&GavdPs:VSgao@xgC=ޮ_o \4Eyew(-+8K RLU+"a]zx=ت?*~- ybq46JPR6|*8mg0VĆYcԗeSHlVE/n6mW&2 !>t*9-Pߟ# q8$ &UodM7q7qHbg!WPݭ~}?'1C",#Yk.9wglfVU[mz-/N LWK!A#c@maxǴb4Qj-%,iQè({f,iu t1'm$DG vSo]S˚%(5yfRA7ePS9[Jt~3OV'cȿ6 1{vڎ%~)pp!(G*ᒎGޫ2Fs">.W^~yN~Ji qohAEM6! ccME0КhK E>S\ZJs Cz8:'hHBC+O7_/"&Q=G%( J[])"১SN6 C6A*FX-3D%9N q9}誒F,'Z4Tpx9hd  .nz}puuprö *^Ik˻OIa&:^R_[\ov/QE a0Rk>e~>|xC 㓕|mm^L}VMۯSlyO_xP>_~Iklo g1(=JMen‰`xF"\#v3O\Ƶ0Dִm"hhԢlQC"~}Y4+_y&UGq<W kd Z8GDՖģP=2 6k[sDzЬ1v s|)yPk@J M[ = S"n Kk |P;:z0eٕҊBT-%mhl_2-_(4mwk9n{= Xϭi&}`x4w?,ȟR/zvRs "C ͱ7q^p>ύ ,i8GjnYn>HOFϝlrlHZmW ] +TeQSobOs*û&J1UfvWeW𕿕lr)A.$ך gL^ZKܤx3TW(, ͑(W];kY(Ktf2xQC ƊCz勶B/ ɥaSiV;3Ж0ߗ -fN"JRߩḀ'SMEO=>ZLY}*;,1"- Fg3 [z)ne)_]=.a1(' gSck2?g@DS|%*C)|,\ 1]۲zbX0>KB_%z@/u7D I@ڜ;`F `=ݽB{2&mKo=N X<+NtVYjdMtd9 N4Yf-hk>}A6H!IiNv&G3݅9Xv7fdM}<ڏVͲB5HUY UgA^4û&JkTa_nno<Сl5H8)cr"6C~n^ݸ-܍C{*+~[-1b?lѪreGfxJ8MaÉ^u^Χn1{ߞBP4JKDß8F# 4&C?nq8szݐUH'#j~ݰFbPwŒWga*خD [y-F^#NԼ=w.NUTzU:QC728ґ*d/vi;h?E80vW= WhRU|uz& k,q49.H[EE[Dhx/ pHϼ;HBJ%7n>[]ދ7~Ӹ7pj3@ɘd/ڻrɫ~ $Jőֆ~CuzWmT-$Ѓ5nnד&̩2G2X`(nKihfv]dۘS)1irx)}h-TgX9*F*0z2z=VxlM-&Nk| _ MAgebP'k(hR wx:RwM/y I57weHsJ~b&pI^{ZDPf?[ p2MVJg~Qoj(c9>boP{!l胧o|^؉1̥עYpϥ)^a^2F_&HBE?Zz_(S"lpw0<*,,{F 8{ sGƞ9^m7MY-_ A5*RznlRш(ϱ,2w,lٯ6mD2EP5 X9|ֶzX/_r?y1sw6ADi5ޥ0ށ+Д٠% =/$WzgK-)ܷ1ʺ7>fF[Ђ3cvEߔ-Q+9Kn *3bE#~($tXCe.,.8lGP@{ya~#mp+ߓ'F‡ daϡʛSI)8>}[/[?c+~M}!وAmIGoЈf(S xj*YDIY t]Pa ~v"q.{ [GD\w# !x((S9^'J@jPZ '$ sP16AM(") Ղ]΃HT^|(i )P.a{8f n1v/5fX< w -Ikg\"lyxs4gYN@1k'7O?̺Uғu/ jG~I)!Eky8:ߋ8 +Q7e/WSnUy%w~xW+;2hH"ߺfDM#ItkH'c7a$0\kKpbe9IA-ϱ3KsRo "KVn|p<;Ms Y 12 r륦653x(?z~^RcRT/- Es cw]I)Umqř.9 ߃91T R3)Zξ80 /0b&%71Bxw/$XH`"aT/YܾA)4ɰ0 7˫"2'D ^5XkeO/S_v"<-e7^qeWqX7^_IC%KI](t $NL/GE,Zb!k4 ۯ^*xZX)ןz]߽,㞟и A ʹc)x£eb4{9( 1@<b (S_Owy v5XzV| ,լ A1CIkuQu1hƒ]S8.-1̽CO+ }Z8_U؟C[ҫO},g&CstV t:wvY1{?L^ӅA򱖲jNx择gCL*w"B\J[SvAq ;&r7:%!us|k|:׌q`/??V]ZzLJW}< 9nC"V+fD OR5Ր`:*[шVffNg;65Gz ?7D!ꍄ@ @aɂO.:5Z[)>Rg!u 6K֛4ľ†YZ~ 7t3 _k'r+wfqwN:i3KW^V xki<4߯!rZoj;% ܷYZ:~|Fs}r|eZ@Zh)x2!>Z ƄHd+%!DJFnkk'oA ssNkGF.B0ȄN8( !0Xk~~|<~VICtJK.%%sU &ߖʹt@# X zzY~5k<&nf}9kSȥT7RB*|Zl<] FH)pL!%?t&n">FN/HePŎՔ_k[QȇϟR| SsˌG+hcmG_mcEXp%r MIp &d(D BR0 P 0=-`Ur[#rɥ])q 0G8_Pn1VGôl4Lo}ar`  :,YΔ krk ߤ/ц+2 0U A8tsdɡxk17:,;qʱmW*I;ΠɧY8/1%ަ% ]B|NSami" >C÷b0g4H{ *ZqvK!ImN* CI^Q{h$Ғ"9J:IdFS0qDxp0fcup>\Y `#rvDs0G].uo _f0XxBscP{Rw2b 4"#9emhY%Vs-!zBO''b6g'JϞ56u^U=./ZGgc_gz9Skmz䲞 ^`x ^BueQ`TyGYcQN dLT!s "^\%d>xuv;EZCW+f>~ W =*Y1.fUL19kw|9 qV^[aQTGqtʸ?ҵݲKXחW'?~xsZJtBڃ~|~2 fx&س_ }y>i>@1^^bYq⥷*8!&ތ.cǦ#Xp`k>i*`op.v# 1 EGt3~mo; 9|!y*%d-`n0]̉p`ڌ9q;C$,!ނUg`v:fW?n(9Doeί'Nt 8dMZې+OnZɚe/YU'tE>o&1'Ԏvf7x;13.fPܓ.Ж ȼW&-d¼CS+%YfY_4nqQtu:!gLx'rS`ȗ$Yf,Du\ৃ1 X$vsaD&Gy[SCqXD%poOI|όuEcq8KQ^PAŢiq~ >Bx̫gY9! X`%*kk6rŘvkyE b`T&ᣣA1 %jeRP,E7Jupwʷ&5)jC@#e mWW4l?tQhh Ih4䁜Ḙx2M('& vaJ z`]l7hl.\&E6IլP)Q5Tu7Ji-qBÞ B 2|5|TAƃȻuP3oo6}zP-Y w@v hB}V3owe(޽ë\$l6''oWoȏX&4xC 8=nJ}ڍZǞuBT`6jMWi\/+wMo%t Ŭu-yz\;HU@+^Ԇ̘J䢖5Iyt2F۹FNOƟFK;"P˾²;9!)jIHzJB䜳 ++) GPcHۘ/@ :K NR};f}լ ~OwR%8O5(9!cÿ_B kLw4v"wm͍X3Esp~Hu{fI%yJE`֒;ٚ%ېDA")[& O~W)"5ፗfYh%xIR]\ʛ+a\窬%dRSTernOLZ_D%f>[δnK^Bv:PkQш pNNGAL_J&Kcr"&~c.EϛZMʖֿo>ʭaL dL*Vzy˗έT~pګӸH3I oȖ! hW+õz Y/$.YȸEb/%؜DkG'2GݕˑF_vf^ݺv[p`i_9P V\h ogJ! ~2DBkO/v))3LeZeFKS "gMoJ^Iz~t}yZ\PE*Zdy!"dM72E &V.L y}#3*frv.!?/@Ɉ)+ Vd ,UeH$3뱵x稲([aEo%G1pz̺%ϒ f xѳt*|#&̤|+&jf!jŐ-ۨ>_ˠe"{-<̚vgj;wqW7vvi/0yys5^9fƺb%`.ՅIòQediUY,!F,ݍ ?.7~&eQ_ߝJ*\-\nً$C"0 )tvǴ s3@;P1R9sV4 $}D?}@ mTSF\NaY5s~ňL3#2B[Buű ; Ⱦ8v@XT Peח_. eN* 94U#r*l-,H<*yxU 1b< X_qRVjǑl 9oapVU) &`4?71liw?2$]i'ozOvAԸ!0 NI/L(5Ol/pU}l>͸rS~XI.(f@ f$,*Ba7fjoc0H;; N̫5LKu09JE \-JԲp(lV8d{WttT5 GZ7A w9XȽAƸzo +U~o~nzk7z88 ƤXXNGT W@.1^ w lw!@~^> nYh+I*%+ F̥Ԭr.* @rKn . xH1 G4J+`* UQkjAU)28FY'JWJх2y"`T%-i$K3Ō@p媜oR~;V։{J~.SNb\P]?Z*}3ۇݏۙLZ>Y6WFBϊU _Ih#pKI2 hnJ B9]A+RBLҰ5#Ũ'(փd7"sWں6#Fd2J_jT N2HMYJ.6vDP(pBYוVVr4\RCJ˸((A=X;uƝP[iNƓfs. J*@,o=<\x9EUb^'s'kY4-{0pݽ;;K XH=rj$h{Sƃe.{iM /ܚt³l\ӽkm}J.𑭿QU_@A3D;]D;H\{{W}_\_:YA{HN R UCםZ@Wp9PKmuV;*#brꓨxF ?p~7'dLT7VЋd!IɭpSG˗=t^ޗQsv?'bGmthqĴ8z77w>iq{M{.]^5Lp2RTzn0OHƜr;Nr)t64%*)(*Wu0Dk=f..\"y}"x@G,P# y&dS䗑F5G tRƻ }9 OܧwKon]X70 yͻ19*IF6`trEe-SwB޸Te-.&~l}l|k}޾(9}~h?;j*y+C,h6ȑx54D?>vb)FTC cU ƛEd5>*׏//ڣa2KTg52RO5q~V:)Gt!8+;1G+=h+R J̓-^BWCi3(<ӂҌ3*l% +XsZVS+rYeJD%,Jb+8!Jq*PЫV.LyβO.YU]_ߟ|^鲉ܷle%P<^:2fDv <5_hl Ytɢ(AbPM5Kt1Y"h `/ h|ZN* ,vQ_;aYĺZh҄bZ) ,4$(J%FZDNdA@<Ɉ/Csh7z&1rOd"%U)f -J0xB.qQ"R`52y˛cη-REG䇭 _yrTy|B=!Zkqat8:ė쯀21ėoϻ?˛IRCOėcnWJPc,yPl_y0trcV"\( Af˥$b P,IP$˫4 Ė (Io>m'*zQeǺQTi'E(Apm$g`*&1 %R_mNVThJs,Ur`&sfĉ/(5ZSJ]r'In d,/+T +qV*g),mNOb{&=;d7;v"tPzPNm@3g{DtBD~M^swuÔd)hEQQBPb@ ;W%)K 'X˺%|SW!"%!t"FZ?²;SJ l}0a  a>gL>'RQ c:+ElOR)@ޒW=*"5 T JFhXhf%3; y&dSG&{i@*IF6\uܹطwKݺ7n+6ybYP7 m6Ԛ&6|w4>!" 6 [iKkqiG͟]dtNZ"[z mN G>{07!(:1]4Q[gt{ר>2ωTV"Ȯ[mc'TkJq}5ݩK So\P-#B)#yQS-(:tRes@6g=t0=FGawa!oDl qwcQ& +u*Ib;7nI653u&Rec:h݆"n]X7mȎVN6>*]bߜ]Op[ 1%5Q@PDWMDa9 iP8e TS.a9RN(S}J5SVzV ?_ ("O8"y Z:=R Zvɱru 'HW@̕)h4輢~RhtY-;BbꟛPTx4'fW5_͵8h.f?Cӌif^~*rIx;8MPWvCNa"wj߆Z*gUA[PkQddђxC3lkM]hPK0:m(lCl^aJ t7C!RL$ A6!M` .+1귓~{yHB'R~~xW~,ӳ5Q#"قyp.;?*T %aTQQ?e b&Ìs.9|q̠{-s4ϋl7^ 3F !܄Ӥ=jrO248&ʻ_YzZ{HSʋ_ڀ e_G?ٝ$)v&ԞtH`#J=$t:ҍ&|LRւ+\ 4%ĠL JE NwQ%B5Ҋ%PbDBV;9]Zu Hٲ E*G2!Jm'ݹ _rd#KU@Q#fB y@ٮrAɈ0336|3 ;d$L[Pns% 8QLQBHcI &L|{|l@Ò9nͭ $(U>ڇ +6uGOXCv9x頎Ԟ {6(H*Q{dPd[} &⌁9n=X,_-=|dLчU7v&w_og'tSMQwjD=jHK@K9BqQWVb Hh8ɺf>mlv?['$q:P iNF#v~+%a`0e)"T:@Q XR!m:  hƘG{kM0#Ŀ:]FDJ 6g xf=f)vB^n0孵7QwX;haqٚ-=Wz ;iuY*ٸ`[n=[&_.ĸr/4cW/rQgUmb.RES!1%mҕ7n8Dٿ+x=ݥoEU#3Zm@ozB>Pf<^=ݖgfS_xXvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005346336515136420617017721 0ustar rootrootJan 28 12:46:11 crc systemd[1]: Starting Kubernetes Kubelet... Jan 28 12:46:11 crc restorecon[4695]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:11 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:46:12 crc restorecon[4695]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 28 12:46:14 crc kubenswrapper[4848]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:46:14 crc kubenswrapper[4848]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 28 12:46:14 crc kubenswrapper[4848]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:46:14 crc kubenswrapper[4848]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:46:14 crc kubenswrapper[4848]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 28 12:46:14 crc kubenswrapper[4848]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.287092 4848 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302906 4848 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302940 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302946 4848 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302951 4848 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302956 4848 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302961 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302966 4848 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302970 4848 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302974 4848 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302977 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302981 4848 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302986 4848 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302991 4848 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.302996 4848 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303001 4848 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303006 4848 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303010 4848 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303015 4848 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303018 4848 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303022 4848 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303033 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303037 4848 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303041 4848 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303045 4848 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303050 4848 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303055 4848 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303060 4848 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303064 4848 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303069 4848 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303074 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303078 4848 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303083 4848 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303088 4848 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303091 4848 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303095 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303100 4848 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303105 4848 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303109 4848 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303113 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303118 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303122 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303126 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303131 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303134 4848 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303139 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303143 4848 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303147 4848 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303151 4848 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303156 4848 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303161 4848 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303167 4848 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303171 4848 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303176 4848 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303179 4848 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303183 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303186 4848 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303190 4848 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303193 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303197 4848 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303201 4848 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303204 4848 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303208 4848 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303211 4848 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303216 4848 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303219 4848 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303223 4848 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303226 4848 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303231 4848 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303234 4848 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303238 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.303241 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303364 4848 flags.go:64] FLAG: --address="0.0.0.0" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303374 4848 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303381 4848 flags.go:64] FLAG: --anonymous-auth="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303387 4848 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303393 4848 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303397 4848 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303403 4848 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303409 4848 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303414 4848 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303421 4848 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303428 4848 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303433 4848 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303437 4848 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303442 4848 flags.go:64] FLAG: --cgroup-root="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303447 4848 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303451 4848 flags.go:64] FLAG: --client-ca-file="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303455 4848 flags.go:64] FLAG: --cloud-config="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303459 4848 flags.go:64] FLAG: --cloud-provider="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303463 4848 flags.go:64] FLAG: --cluster-dns="[]" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303471 4848 flags.go:64] FLAG: --cluster-domain="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303475 4848 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303479 4848 flags.go:64] FLAG: --config-dir="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303483 4848 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303487 4848 flags.go:64] FLAG: --container-log-max-files="5" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303493 4848 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303497 4848 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303501 4848 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303506 4848 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303510 4848 flags.go:64] FLAG: --contention-profiling="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303514 4848 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303518 4848 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303523 4848 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303527 4848 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303532 4848 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303536 4848 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303541 4848 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303545 4848 flags.go:64] FLAG: --enable-load-reader="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303549 4848 flags.go:64] FLAG: --enable-server="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303553 4848 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303559 4848 flags.go:64] FLAG: --event-burst="100" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303563 4848 flags.go:64] FLAG: --event-qps="50" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303568 4848 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303574 4848 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303579 4848 flags.go:64] FLAG: --eviction-hard="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303584 4848 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303589 4848 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303593 4848 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303597 4848 flags.go:64] FLAG: --eviction-soft="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303602 4848 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303606 4848 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303610 4848 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303614 4848 flags.go:64] FLAG: --experimental-mounter-path="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303618 4848 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303622 4848 flags.go:64] FLAG: --fail-swap-on="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303627 4848 flags.go:64] FLAG: --feature-gates="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303632 4848 flags.go:64] FLAG: --file-check-frequency="20s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303636 4848 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303641 4848 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303646 4848 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303651 4848 flags.go:64] FLAG: --healthz-port="10248" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303655 4848 flags.go:64] FLAG: --help="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303660 4848 flags.go:64] FLAG: --hostname-override="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303663 4848 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303668 4848 flags.go:64] FLAG: --http-check-frequency="20s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303672 4848 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303677 4848 flags.go:64] FLAG: --image-credential-provider-config="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303682 4848 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303687 4848 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303691 4848 flags.go:64] FLAG: --image-service-endpoint="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303695 4848 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303699 4848 flags.go:64] FLAG: --kube-api-burst="100" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303703 4848 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303707 4848 flags.go:64] FLAG: --kube-api-qps="50" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303712 4848 flags.go:64] FLAG: --kube-reserved="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303716 4848 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303720 4848 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303725 4848 flags.go:64] FLAG: --kubelet-cgroups="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303729 4848 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303733 4848 flags.go:64] FLAG: --lock-file="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303738 4848 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303742 4848 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303747 4848 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303753 4848 flags.go:64] FLAG: --log-json-split-stream="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303757 4848 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303761 4848 flags.go:64] FLAG: --log-text-split-stream="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303765 4848 flags.go:64] FLAG: --logging-format="text" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303770 4848 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303774 4848 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303778 4848 flags.go:64] FLAG: --manifest-url="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303782 4848 flags.go:64] FLAG: --manifest-url-header="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303788 4848 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303792 4848 flags.go:64] FLAG: --max-open-files="1000000" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303797 4848 flags.go:64] FLAG: --max-pods="110" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303801 4848 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303805 4848 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303809 4848 flags.go:64] FLAG: --memory-manager-policy="None" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303813 4848 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303818 4848 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303823 4848 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303827 4848 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303838 4848 flags.go:64] FLAG: --node-status-max-images="50" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303842 4848 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303846 4848 flags.go:64] FLAG: --oom-score-adj="-999" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303851 4848 flags.go:64] FLAG: --pod-cidr="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303855 4848 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303863 4848 flags.go:64] FLAG: --pod-manifest-path="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303867 4848 flags.go:64] FLAG: --pod-max-pids="-1" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303872 4848 flags.go:64] FLAG: --pods-per-core="0" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303876 4848 flags.go:64] FLAG: --port="10250" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303880 4848 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303884 4848 flags.go:64] FLAG: --provider-id="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303888 4848 flags.go:64] FLAG: --qos-reserved="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303892 4848 flags.go:64] FLAG: --read-only-port="10255" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303896 4848 flags.go:64] FLAG: --register-node="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303900 4848 flags.go:64] FLAG: --register-schedulable="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303905 4848 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303912 4848 flags.go:64] FLAG: --registry-burst="10" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303916 4848 flags.go:64] FLAG: --registry-qps="5" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303920 4848 flags.go:64] FLAG: --reserved-cpus="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303924 4848 flags.go:64] FLAG: --reserved-memory="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303929 4848 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303934 4848 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303938 4848 flags.go:64] FLAG: --rotate-certificates="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303952 4848 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303957 4848 flags.go:64] FLAG: --runonce="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303962 4848 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303967 4848 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303972 4848 flags.go:64] FLAG: --seccomp-default="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303976 4848 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303984 4848 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303988 4848 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303993 4848 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.303997 4848 flags.go:64] FLAG: --storage-driver-password="root" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304002 4848 flags.go:64] FLAG: --storage-driver-secure="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304006 4848 flags.go:64] FLAG: --storage-driver-table="stats" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304011 4848 flags.go:64] FLAG: --storage-driver-user="root" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304015 4848 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304019 4848 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304024 4848 flags.go:64] FLAG: --system-cgroups="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304028 4848 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304035 4848 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304039 4848 flags.go:64] FLAG: --tls-cert-file="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304043 4848 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304057 4848 flags.go:64] FLAG: --tls-min-version="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304061 4848 flags.go:64] FLAG: --tls-private-key-file="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304066 4848 flags.go:64] FLAG: --topology-manager-policy="none" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304070 4848 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304074 4848 flags.go:64] FLAG: --topology-manager-scope="container" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304078 4848 flags.go:64] FLAG: --v="2" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304088 4848 flags.go:64] FLAG: --version="false" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304094 4848 flags.go:64] FLAG: --vmodule="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304100 4848 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.304105 4848 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304214 4848 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304222 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304229 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304235 4848 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304240 4848 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304248 4848 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304253 4848 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304257 4848 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304261 4848 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304264 4848 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304288 4848 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304294 4848 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304300 4848 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304305 4848 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304310 4848 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304315 4848 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304319 4848 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304324 4848 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304329 4848 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304333 4848 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304336 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304340 4848 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304344 4848 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304348 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304351 4848 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304355 4848 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304359 4848 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304363 4848 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304368 4848 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304372 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304377 4848 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304381 4848 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304386 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304391 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304399 4848 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304404 4848 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304409 4848 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304414 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304418 4848 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304422 4848 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304427 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304430 4848 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304435 4848 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304438 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304442 4848 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304446 4848 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304449 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304453 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304457 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304461 4848 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304466 4848 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304471 4848 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304477 4848 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304482 4848 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304488 4848 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304494 4848 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304499 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304504 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304509 4848 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304515 4848 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304521 4848 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304526 4848 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304531 4848 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304537 4848 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304541 4848 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304545 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304550 4848 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304555 4848 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304560 4848 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304565 4848 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.304572 4848 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.305474 4848 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.318162 4848 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.318223 4848 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318345 4848 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318357 4848 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318363 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318368 4848 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318374 4848 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318379 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318384 4848 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318388 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318393 4848 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318397 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318401 4848 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318405 4848 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318410 4848 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318414 4848 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318420 4848 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318429 4848 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318434 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318439 4848 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318445 4848 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318449 4848 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318453 4848 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318458 4848 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318462 4848 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318467 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318475 4848 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318481 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318487 4848 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318493 4848 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318499 4848 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318505 4848 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318512 4848 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318518 4848 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318523 4848 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318528 4848 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318534 4848 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318539 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318544 4848 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318549 4848 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318554 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318559 4848 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318564 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318569 4848 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318573 4848 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318578 4848 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318583 4848 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318590 4848 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318595 4848 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318600 4848 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318606 4848 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318612 4848 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318619 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318624 4848 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318629 4848 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318634 4848 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318639 4848 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318643 4848 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318648 4848 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318653 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318658 4848 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318665 4848 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318671 4848 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318677 4848 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318683 4848 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318688 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318693 4848 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318699 4848 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318704 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318709 4848 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318715 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318720 4848 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318727 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.318741 4848 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318937 4848 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318946 4848 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318950 4848 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318954 4848 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318958 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318962 4848 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318965 4848 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318968 4848 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318972 4848 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318977 4848 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318985 4848 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.318992 4848 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319002 4848 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319008 4848 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319012 4848 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319016 4848 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319024 4848 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319028 4848 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319034 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319038 4848 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319042 4848 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319046 4848 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319050 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319055 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319059 4848 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319064 4848 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319068 4848 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319072 4848 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319076 4848 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319080 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319084 4848 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319089 4848 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319094 4848 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319099 4848 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319106 4848 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319109 4848 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319114 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319117 4848 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319121 4848 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319124 4848 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319128 4848 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319131 4848 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319135 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319140 4848 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319144 4848 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319153 4848 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319161 4848 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319167 4848 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319172 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319176 4848 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319181 4848 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319185 4848 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319190 4848 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319194 4848 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319198 4848 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319203 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319207 4848 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319212 4848 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319217 4848 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319222 4848 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319229 4848 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319235 4848 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319240 4848 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319245 4848 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319249 4848 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319253 4848 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319262 4848 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319283 4848 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319287 4848 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319292 4848 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.319298 4848 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.319305 4848 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.319508 4848 server.go:940] "Client rotation is on, will bootstrap in background" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.329344 4848 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.329526 4848 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.331787 4848 server.go:997] "Starting client certificate rotation" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.331825 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.332679 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-26 05:47:04.001966019 +0000 UTC Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.332778 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.443368 4848 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.445674 4848 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.446146 4848 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.492597 4848 log.go:25] "Validated CRI v1 runtime API" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.576358 4848 log.go:25] "Validated CRI v1 image API" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.579985 4848 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.588965 4848 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-28-12-40-09-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.589018 4848 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:44 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.608304 4848 manager.go:217] Machine: {Timestamp:2026-01-28 12:46:14.605565916 +0000 UTC m=+1.517782984 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:e0f42f58-1276-4f22-b2e0-2ee1470a6c7e BootID:c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:44 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:f9:63:2a Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:f9:63:2a Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:57:05:4a Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:f8:f0:11 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:d6:9a:5c Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:d0:42:47 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:d2:72:b5:25:ad:63 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:6e:cb:93:5a:76:f0 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.608646 4848 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.608833 4848 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.610480 4848 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.610803 4848 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.610853 4848 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.611125 4848 topology_manager.go:138] "Creating topology manager with none policy" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.611140 4848 container_manager_linux.go:303] "Creating device plugin manager" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.611612 4848 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.611660 4848 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.620462 4848 state_mem.go:36] "Initialized new in-memory state store" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.620743 4848 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.624782 4848 kubelet.go:418] "Attempting to sync node with API server" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.624853 4848 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.624950 4848 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.624977 4848 kubelet.go:324] "Adding apiserver pod source" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.625021 4848 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.629511 4848 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.630364 4848 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.631879 4848 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.633964 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.633981 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.634082 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634019 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.634119 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634163 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634173 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634180 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634193 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634201 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634208 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634220 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634229 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634237 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634248 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634258 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634294 4848 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.634900 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.635051 4848 server.go:1280] "Started kubelet" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.655768 4848 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.655742 4848 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.656782 4848 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 28 12:46:14 crc systemd[1]: Started Kubernetes Kubelet. Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.751993 4848 server.go:460] "Adding debug handlers to kubelet server" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.753352 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.753443 4848 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.753510 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 07:54:38.297236486 +0000 UTC Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.753597 4848 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.753640 4848 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.753681 4848 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.753902 4848 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.754658 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.754765 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.756008 4848 factory.go:55] Registering systemd factory Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.756046 4848 factory.go:221] Registration of the systemd container factory successfully Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.760764 4848 factory.go:153] Registering CRI-O factory Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.760924 4848 factory.go:221] Registration of the crio container factory successfully Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.761088 4848 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.761189 4848 factory.go:103] Registering Raw factory Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.761323 4848 manager.go:1196] Started watching for new ooms in manager Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.763050 4848 manager.go:319] Starting recovery of all containers Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.763995 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="200ms" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772253 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772379 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772403 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772416 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772432 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772449 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772465 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772487 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772511 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772527 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772541 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772556 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772581 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772605 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772687 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772711 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772730 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772750 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772771 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772790 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772809 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772839 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772860 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772878 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772892 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772908 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772927 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772950 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772964 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772979 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.772997 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.773020 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.773034 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.773049 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.773071 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.774140 4848 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.138:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188ee5cd8f187026 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:46:14.634983462 +0000 UTC m=+1.547200500,LastTimestamp:2026-01-28 12:46:14.634983462 +0000 UTC m=+1.547200500,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.776774 4848 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.776946 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.777048 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.777147 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.777292 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.777411 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.777510 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.777832 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.777959 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778040 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778468 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778560 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778642 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778707 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778811 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778873 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.778938 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779008 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779094 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779163 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779235 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779334 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779407 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779474 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779548 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779615 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779691 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779759 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779831 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.779937 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.780046 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.780294 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.780397 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.780471 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.780979 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.781134 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.781180 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.781199 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.781217 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.781247 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.781264 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.782301 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.782445 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.782477 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.782517 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.782545 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.782574 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.782925 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783011 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783047 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783110 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783137 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783154 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783174 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783198 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783583 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783631 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783664 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783775 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.783920 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784015 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784144 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784203 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784266 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784314 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784337 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784408 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784436 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784457 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784485 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.784976 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785197 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785362 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785399 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785442 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785478 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785566 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785614 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785652 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785690 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785718 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785737 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785756 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785783 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785802 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785830 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785871 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785890 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785935 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785956 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.785981 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786000 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786019 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786056 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786078 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786097 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786121 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786139 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786161 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786206 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786223 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786253 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786288 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786309 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786335 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786354 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786378 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786397 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786426 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786456 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786476 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786499 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786517 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786542 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786560 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786577 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786599 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786618 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786640 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786664 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786745 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786773 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786803 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786822 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786848 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786870 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786895 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786915 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786935 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.786961 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787006 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787036 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787056 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787082 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787104 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787124 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787158 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787178 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787205 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787229 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787248 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787382 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787636 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787661 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787689 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787706 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787725 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787755 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787775 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787808 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787825 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787840 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787859 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787877 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787905 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787920 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787945 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787968 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.787984 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788008 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788025 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788040 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788056 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788082 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788103 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788119 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788138 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788175 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788192 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788213 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788230 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788247 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788268 4848 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788297 4848 reconstruct.go:97] "Volume reconstruction finished" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788308 4848 reconciler.go:26] "Reconciler: start to sync state" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.788798 4848 manager.go:324] Recovery completed Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.801152 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.803049 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.803087 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.803096 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.804003 4848 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.804030 4848 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.804060 4848 state_mem.go:36] "Initialized new in-memory state store" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.845778 4848 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.848626 4848 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.848672 4848 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 28 12:46:14 crc kubenswrapper[4848]: I0128 12:46:14.848717 4848 kubelet.go:2335] "Starting kubelet main sync loop" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.848778 4848 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 28 12:46:14 crc kubenswrapper[4848]: W0128 12:46:14.849765 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.849882 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.854184 4848 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.949905 4848 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.954467 4848 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:46:14 crc kubenswrapper[4848]: E0128 12:46:14.965615 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="400ms" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.052739 4848 policy_none.go:49] "None policy: Start" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.054162 4848 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.054225 4848 state_mem.go:35] "Initializing new in-memory state store" Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.055374 4848 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.128910 4848 manager.go:334] "Starting Device Plugin manager" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.128987 4848 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.129004 4848 server.go:79] "Starting device plugin registration server" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.129538 4848 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.129596 4848 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.130061 4848 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.130197 4848 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.130231 4848 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.139757 4848 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.151015 4848 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.151185 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.152687 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.152747 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.152762 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.152986 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.154410 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.154467 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.154483 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.156149 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.156239 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.156276 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.156293 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.156193 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157153 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157193 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157206 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157700 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157757 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157771 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157929 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.157988 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.158012 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.158254 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.158404 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.158454 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159260 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159313 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159219 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159377 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159390 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159328 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159694 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159857 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.159895 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160377 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160411 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160424 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160522 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160558 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160572 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160660 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.160704 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.162460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.162501 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.162520 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.195886 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.196725 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.196856 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.196897 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.230657 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.233523 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.233584 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.233598 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.233643 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.234545 4848 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.138:6443: connect: connection refused" node="crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298634 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298690 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298713 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298730 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298800 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298831 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298848 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298867 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298883 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298898 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298921 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298934 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298950 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298966 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.298981 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.299056 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.299115 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.299201 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.299519 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.367014 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="800ms" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.399926 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400059 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400128 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400204 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400229 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400279 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400308 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400329 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400332 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400331 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400419 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400454 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400466 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400532 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400551 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400417 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400446 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400462 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400551 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400477 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400743 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.400841 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.435192 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.436578 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.436661 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.436685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.436713 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.437204 4848 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.138:6443: connect: connection refused" node="crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.495536 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.524524 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.543612 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.569722 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.577409 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:15 crc kubenswrapper[4848]: W0128 12:46:15.609329 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-4bfcc5a43cee4d56742a014b04f48477156b581db2cb9c01981997e97c1ddeee WatchSource:0}: Error finding container 4bfcc5a43cee4d56742a014b04f48477156b581db2cb9c01981997e97c1ddeee: Status 404 returned error can't find the container with id 4bfcc5a43cee4d56742a014b04f48477156b581db2cb9c01981997e97c1ddeee Jan 28 12:46:15 crc kubenswrapper[4848]: W0128 12:46:15.610790 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-c9180103542f54809bcd7ec1cee529b4926c5dffa6617251a0651bd2ddb62215 WatchSource:0}: Error finding container c9180103542f54809bcd7ec1cee529b4926c5dffa6617251a0651bd2ddb62215: Status 404 returned error can't find the container with id c9180103542f54809bcd7ec1cee529b4926c5dffa6617251a0651bd2ddb62215 Jan 28 12:46:15 crc kubenswrapper[4848]: W0128 12:46:15.614899 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-77d1b097a97bed4c310a20597d49071cd62abb2c7661f65f4b575fdffac77384 WatchSource:0}: Error finding container 77d1b097a97bed4c310a20597d49071cd62abb2c7661f65f4b575fdffac77384: Status 404 returned error can't find the container with id 77d1b097a97bed4c310a20597d49071cd62abb2c7661f65f4b575fdffac77384 Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.636583 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:15 crc kubenswrapper[4848]: W0128 12:46:15.639103 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-4c52e651cb52c0b245306007fe5ae7500e3d25a339bd3dd2eab4f3e7a14ddf0e WatchSource:0}: Error finding container 4c52e651cb52c0b245306007fe5ae7500e3d25a339bd3dd2eab4f3e7a14ddf0e: Status 404 returned error can't find the container with id 4c52e651cb52c0b245306007fe5ae7500e3d25a339bd3dd2eab4f3e7a14ddf0e Jan 28 12:46:15 crc kubenswrapper[4848]: W0128 12:46:15.647550 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-2a1a069a54af18d69c5855465283914de5d968d194100bd4fd7680e231b541c6 WatchSource:0}: Error finding container 2a1a069a54af18d69c5855465283914de5d968d194100bd4fd7680e231b541c6: Status 404 returned error can't find the container with id 2a1a069a54af18d69c5855465283914de5d968d194100bd4fd7680e231b541c6 Jan 28 12:46:15 crc kubenswrapper[4848]: W0128 12:46:15.740697 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.740809 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.754447 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 13:03:42.178025689 +0000 UTC Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.837586 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.839003 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.839046 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.839058 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.839086 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.839792 4848 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.138:6443: connect: connection refused" node="crc" Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.853911 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2a1a069a54af18d69c5855465283914de5d968d194100bd4fd7680e231b541c6"} Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.855120 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4c52e651cb52c0b245306007fe5ae7500e3d25a339bd3dd2eab4f3e7a14ddf0e"} Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.856117 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"77d1b097a97bed4c310a20597d49071cd62abb2c7661f65f4b575fdffac77384"} Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.856878 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c9180103542f54809bcd7ec1cee529b4926c5dffa6617251a0651bd2ddb62215"} Jan 28 12:46:15 crc kubenswrapper[4848]: I0128 12:46:15.857636 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"4bfcc5a43cee4d56742a014b04f48477156b581db2cb9c01981997e97c1ddeee"} Jan 28 12:46:15 crc kubenswrapper[4848]: W0128 12:46:15.968547 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:15 crc kubenswrapper[4848]: E0128 12:46:15.968674 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:16 crc kubenswrapper[4848]: W0128 12:46:16.013488 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:16 crc kubenswrapper[4848]: E0128 12:46:16.013590 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:16 crc kubenswrapper[4848]: W0128 12:46:16.034176 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:16 crc kubenswrapper[4848]: E0128 12:46:16.034308 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:16 crc kubenswrapper[4848]: E0128 12:46:16.169022 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="1.6s" Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.608882 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:46:16 crc kubenswrapper[4848]: E0128 12:46:16.610088 4848 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.636840 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.640884 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.642522 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.642587 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.642603 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.642646 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:16 crc kubenswrapper[4848]: E0128 12:46:16.651903 4848 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.138:6443: connect: connection refused" node="crc" Jan 28 12:46:16 crc kubenswrapper[4848]: I0128 12:46:16.755073 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 09:24:22.796671659 +0000 UTC Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.636258 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:17 crc kubenswrapper[4848]: W0128 12:46:17.645964 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:17 crc kubenswrapper[4848]: E0128 12:46:17.646024 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.755650 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 09:10:12.692687262 +0000 UTC Jan 28 12:46:17 crc kubenswrapper[4848]: E0128 12:46:17.770739 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="3.2s" Jan 28 12:46:17 crc kubenswrapper[4848]: W0128 12:46:17.825403 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:17 crc kubenswrapper[4848]: E0128 12:46:17.825494 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.871038 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287"} Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.871096 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04"} Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.872448 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf" exitCode=0 Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.872670 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf"} Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.872822 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.874135 4848 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9cc8ab200f6ec430d32f01ba483afd3d32ee95ccaec2d42e751981db025d1bd7" exitCode=0 Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.874186 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9cc8ab200f6ec430d32f01ba483afd3d32ee95ccaec2d42e751981db025d1bd7"} Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.874299 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.874760 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.874788 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.874800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.875019 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.875057 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.875069 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.875473 4848 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7" exitCode=0 Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.875528 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7"} Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.875606 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.876299 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.876338 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.876351 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.877108 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.878360 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.878388 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.878401 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.879506 4848 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="a2bef22236e7e911b4f1c8fbf9cc9019faf289a6b921b53d812985add81351f2" exitCode=0 Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.879643 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"a2bef22236e7e911b4f1c8fbf9cc9019faf289a6b921b53d812985add81351f2"} Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.879764 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.881533 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.881578 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:17 crc kubenswrapper[4848]: I0128 12:46:17.881592 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:17 crc kubenswrapper[4848]: W0128 12:46:17.908499 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:17 crc kubenswrapper[4848]: E0128 12:46:17.908618 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.255020 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.256493 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.256538 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.256555 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.256587 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:18 crc kubenswrapper[4848]: E0128 12:46:18.257060 4848 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.138:6443: connect: connection refused" node="crc" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.635972 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:18 crc kubenswrapper[4848]: W0128 12:46:18.717338 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:18 crc kubenswrapper[4848]: E0128 12:46:18.717433 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.756551 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 14:27:58.132336464 +0000 UTC Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.886674 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"8d33d1ff4f0a88830d80480831dfdf215cb895a234f1bd099c76e6c2823a74bf"} Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.886812 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.888678 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.888729 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.888743 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.891558 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65"} Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.891604 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954"} Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.891665 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.894864 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.894916 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.894932 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.898756 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e"} Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.898834 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb"} Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.901535 4848 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d92adbec79956d4989bb5be6ffac42f6f92ab8d1c07cb43ba6a0f2c46b730c09" exitCode=0 Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.901642 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d92adbec79956d4989bb5be6ffac42f6f92ab8d1c07cb43ba6a0f2c46b730c09"} Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.901787 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.902865 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.902907 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.902921 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.912617 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5"} Jan 28 12:46:18 crc kubenswrapper[4848]: I0128 12:46:18.912694 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69"} Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.636575 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.757563 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 12:47:15.213530697 +0000 UTC Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.918845 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056"} Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.918963 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.920394 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.920467 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.920490 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.923576 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"15112daadbe80ad48a85787c8ea4fd35f02f596f60a0dc8b1b5503824f927d30"} Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.923619 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9"} Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.923634 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d"} Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.923674 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.924731 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.924769 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.924784 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.926608 4848 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ff4d727fd9b65ceeb25a102495e415cd1aa9cc5f1ac64a6d01b9c186046a6be5" exitCode=0 Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.926743 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.927292 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.927635 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ff4d727fd9b65ceeb25a102495e415cd1aa9cc5f1ac64a6d01b9c186046a6be5"} Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.927728 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928358 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928392 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928403 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928407 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928431 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928443 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928400 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928582 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:19 crc kubenswrapper[4848]: I0128 12:46:19.928611 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:20 crc kubenswrapper[4848]: E0128 12:46:20.034896 4848 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.138:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188ee5cd8f187026 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:46:14.634983462 +0000 UTC m=+1.547200500,LastTimestamp:2026-01-28 12:46:14.634983462 +0000 UTC m=+1.547200500,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.477212 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.637443 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.709896 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:46:20 crc kubenswrapper[4848]: E0128 12:46:20.711312 4848 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.758739 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 20:10:40.307961301 +0000 UTC Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.934902 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.934971 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.934990 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.935024 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.935065 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.934977 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f017584791bda02800088ab9675b9d17a06c2be89d97e3f0409d50531328b9e4"} Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936452 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936490 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936506 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936521 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936550 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936562 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936524 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936524 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:20 crc kubenswrapper[4848]: I0128 12:46:20.936685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:20 crc kubenswrapper[4848]: E0128 12:46:20.971920 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="6.4s" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.458032 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.459713 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.459763 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.459777 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.459815 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:21 crc kubenswrapper[4848]: E0128 12:46:21.460503 4848 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.138:6443: connect: connection refused" node="crc" Jan 28 12:46:21 crc kubenswrapper[4848]: W0128 12:46:21.547453 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:21 crc kubenswrapper[4848]: E0128 12:46:21.547638 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.636758 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.759079 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 18:54:46.586402277 +0000 UTC Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.941811 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.944190 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="15112daadbe80ad48a85787c8ea4fd35f02f596f60a0dc8b1b5503824f927d30" exitCode=255 Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.944323 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"15112daadbe80ad48a85787c8ea4fd35f02f596f60a0dc8b1b5503824f927d30"} Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.944496 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.945798 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.945843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.945856 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.946488 4848 scope.go:117] "RemoveContainer" containerID="15112daadbe80ad48a85787c8ea4fd35f02f596f60a0dc8b1b5503824f927d30" Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.947994 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ec5526a8a825a1ce3c01833a3ac46e930eb60495e4622c63d1ffd3d8b887878c"} Jan 28 12:46:21 crc kubenswrapper[4848]: I0128 12:46:21.948029 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"40c6e6e54b8cc68cc72932847202f6916e84ba41ccea89a622f7afa6a1a57220"} Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.496074 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.496299 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.497431 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.497465 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.497479 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:22 crc kubenswrapper[4848]: W0128 12:46:22.634163 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:22 crc kubenswrapper[4848]: E0128 12:46:22.634695 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.138:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.635568 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.138:6443: connect: connection refused Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.759509 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 16:32:09.168732227 +0000 UTC Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.951922 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.953937 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3"} Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.954142 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.954203 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.955321 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.955359 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.955371 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.959593 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"68c923fbacebc07e36ab0de094b6cf1892d4284263d635217a53df57b7060087"} Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.959650 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a0e20d68d5c1e18677d2fce5108736556550c78d491104b82b87eba24b913b12"} Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.959754 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.960670 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.960714 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:22 crc kubenswrapper[4848]: I0128 12:46:22.960730 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.725051 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.725294 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.726798 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.726850 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.726861 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.770593 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 04:24:30.939637275 +0000 UTC Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.941003 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.961753 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.961806 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.961801 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.962958 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.963018 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.963031 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.963340 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.963374 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:23 crc kubenswrapper[4848]: I0128 12:46:23.963385 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.294076 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.539121 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.770856 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 17:00:36.558786589 +0000 UTC Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.805826 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.806057 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.807834 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.807953 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.807971 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.825319 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.964218 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.964509 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.965359 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.965409 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.965423 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.966539 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.966577 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:24 crc kubenswrapper[4848]: I0128 12:46:24.966587 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:25 crc kubenswrapper[4848]: E0128 12:46:25.139915 4848 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.718113 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.718354 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.719800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.719844 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.719855 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.771787 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 06:24:18.578640571 +0000 UTC Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.968508 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.969618 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.969670 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:25 crc kubenswrapper[4848]: I0128 12:46:25.969685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:26 crc kubenswrapper[4848]: I0128 12:46:26.772481 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 05:49:14.328866606 +0000 UTC Jan 28 12:46:27 crc kubenswrapper[4848]: I0128 12:46:27.773308 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 21:05:27.627752658 +0000 UTC Jan 28 12:46:27 crc kubenswrapper[4848]: I0128 12:46:27.860831 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:27 crc kubenswrapper[4848]: I0128 12:46:27.862522 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:27 crc kubenswrapper[4848]: I0128 12:46:27.862600 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:27 crc kubenswrapper[4848]: I0128 12:46:27.862614 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:27 crc kubenswrapper[4848]: I0128 12:46:27.862650 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.222785 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.223006 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.224297 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.224333 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.224342 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.227478 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.396772 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.396991 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.398420 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.398464 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.398477 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.775337 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 03:55:52.269114073 +0000 UTC Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.985145 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.986073 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.986133 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:28 crc kubenswrapper[4848]: I0128 12:46:28.986145 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:29 crc kubenswrapper[4848]: I0128 12:46:29.344504 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:46:29 crc kubenswrapper[4848]: I0128 12:46:29.776035 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-06 20:29:39.038876516 +0000 UTC Jan 28 12:46:30 crc kubenswrapper[4848]: I0128 12:46:30.776364 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-06 21:59:01.971452911 +0000 UTC Jan 28 12:46:31 crc kubenswrapper[4848]: I0128 12:46:31.222793 4848 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:46:31 crc kubenswrapper[4848]: I0128 12:46:31.222894 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:46:31 crc kubenswrapper[4848]: I0128 12:46:31.777688 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 02:32:37.755501604 +0000 UTC Jan 28 12:46:32 crc kubenswrapper[4848]: I0128 12:46:32.778850 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 23:49:47.043232816 +0000 UTC Jan 28 12:46:33 crc kubenswrapper[4848]: I0128 12:46:33.636869 4848 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 28 12:46:33 crc kubenswrapper[4848]: I0128 12:46:33.779578 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 00:40:31.695743329 +0000 UTC Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.003533 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.004098 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.006523 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" exitCode=255 Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.006595 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3"} Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.006682 4848 scope.go:117] "RemoveContainer" containerID="15112daadbe80ad48a85787c8ea4fd35f02f596f60a0dc8b1b5503824f927d30" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.006825 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.007834 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.007872 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.007886 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.008576 4848 scope.go:117] "RemoveContainer" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" Jan 28 12:46:34 crc kubenswrapper[4848]: E0128 12:46:34.008784 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:46:34 crc kubenswrapper[4848]: W0128 12:46:34.087278 4848 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.087670 4848 trace.go:236] Trace[1302419493]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 12:46:24.085) (total time: 10002ms): Jan 28 12:46:34 crc kubenswrapper[4848]: Trace[1302419493]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (12:46:34.087) Jan 28 12:46:34 crc kubenswrapper[4848]: Trace[1302419493]: [10.002111615s] [10.002111615s] END Jan 28 12:46:34 crc kubenswrapper[4848]: E0128 12:46:34.087700 4848 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.328412 4848 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.328503 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.333877 4848 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.333956 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.780616 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 16:45:16.974876614 +0000 UTC Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.794648 4848 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]log ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]etcd ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/generic-apiserver-start-informers ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/priority-and-fairness-filter ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-apiextensions-informers ok Jan 28 12:46:34 crc kubenswrapper[4848]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Jan 28 12:46:34 crc kubenswrapper[4848]: [-]poststarthook/crd-informer-synced failed: reason withheld Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-system-namespaces-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-service-ip-repair-controllers ok Jan 28 12:46:34 crc kubenswrapper[4848]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 28 12:46:34 crc kubenswrapper[4848]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/priority-and-fairness-config-producer ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/bootstrap-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/start-kube-aggregator-informers ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [-]poststarthook/apiservice-registration-controller failed: reason withheld Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 28 12:46:34 crc kubenswrapper[4848]: [-]poststarthook/apiservice-discovery-controller failed: reason withheld Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]autoregister-completion ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/apiservice-openapi-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 28 12:46:34 crc kubenswrapper[4848]: livez check failed Jan 28 12:46:34 crc kubenswrapper[4848]: I0128 12:46:34.794734 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:46:35 crc kubenswrapper[4848]: I0128 12:46:35.011221 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:46:35 crc kubenswrapper[4848]: E0128 12:46:35.140350 4848 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 12:46:35 crc kubenswrapper[4848]: I0128 12:46:35.781218 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 11:14:30.879940147 +0000 UTC Jan 28 12:46:36 crc kubenswrapper[4848]: I0128 12:46:36.782083 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 09:58:12.284679377 +0000 UTC Jan 28 12:46:37 crc kubenswrapper[4848]: I0128 12:46:37.117846 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:37 crc kubenswrapper[4848]: I0128 12:46:37.118119 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:37 crc kubenswrapper[4848]: I0128 12:46:37.120036 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:37 crc kubenswrapper[4848]: I0128 12:46:37.120112 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:37 crc kubenswrapper[4848]: I0128 12:46:37.120128 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:37 crc kubenswrapper[4848]: I0128 12:46:37.120859 4848 scope.go:117] "RemoveContainer" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" Jan 28 12:46:37 crc kubenswrapper[4848]: E0128 12:46:37.121088 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:46:37 crc kubenswrapper[4848]: I0128 12:46:37.783170 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 02:47:34.649012236 +0000 UTC Jan 28 12:46:38 crc kubenswrapper[4848]: I0128 12:46:38.440085 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 28 12:46:38 crc kubenswrapper[4848]: I0128 12:46:38.440334 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:38 crc kubenswrapper[4848]: I0128 12:46:38.441839 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:38 crc kubenswrapper[4848]: I0128 12:46:38.441888 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:38 crc kubenswrapper[4848]: I0128 12:46:38.441900 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:38 crc kubenswrapper[4848]: I0128 12:46:38.472702 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 28 12:46:38 crc kubenswrapper[4848]: I0128 12:46:38.783981 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 01:24:32.736780064 +0000 UTC Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.023126 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.024411 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.024464 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.024477 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.320779 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.326439 4848 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.330367 4848 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.330537 4848 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.331134 4848 trace.go:236] Trace[1215520902]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 12:46:24.744) (total time: 14586ms): Jan 28 12:46:39 crc kubenswrapper[4848]: Trace[1215520902]: ---"Objects listed" error: 14586ms (12:46:39.330) Jan 28 12:46:39 crc kubenswrapper[4848]: Trace[1215520902]: [14.586167236s] [14.586167236s] END Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.331166 4848 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.336474 4848 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.361452 4848 csr.go:261] certificate signing request csr-5hbbq is approved, waiting to be issued Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.371527 4848 csr.go:257] certificate signing request csr-5hbbq is issued Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.545331 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.555415 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.572172 4848 scope.go:117] "RemoveContainer" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.572437 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.638751 4848 apiserver.go:52] "Watching apiserver" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.641573 4848 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.641866 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.642222 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.642414 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.642483 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.642518 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.642641 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.642421 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.642745 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.642824 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.642896 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.644838 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.645152 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.645236 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.646459 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.646576 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.646608 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.647438 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.647478 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.648044 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.654800 4848 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.676885 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.689101 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.703713 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.718072 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.733170 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.733618 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.733545 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.733714 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.733741 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734016 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734043 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734081 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734192 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734241 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734291 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734316 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734344 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734361 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734378 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734393 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734408 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734424 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734439 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734454 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734473 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734490 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734505 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734523 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734541 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734539 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734556 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734574 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734594 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734610 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734626 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734642 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734658 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734675 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734692 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734711 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734736 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734749 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734756 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734765 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734811 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734842 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734872 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734898 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734922 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734932 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734948 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734977 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.734982 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735006 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735029 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735053 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735081 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735094 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735106 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735136 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735278 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735315 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735467 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735480 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735644 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735697 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735691 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735777 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735851 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735882 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735936 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735987 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.735988 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736067 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736135 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736329 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736378 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736472 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736527 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736580 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736689 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736870 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.736880 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737005 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737077 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737136 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737124 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737184 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737402 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737397 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737434 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737457 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737463 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737482 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737511 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737541 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737562 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737582 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737603 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737622 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737644 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737665 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737666 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737686 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737709 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737731 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737752 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737770 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737794 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737816 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737838 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737862 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737887 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737910 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737931 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737953 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737973 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.737993 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738037 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738054 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738071 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738095 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738112 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738132 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738150 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738169 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738189 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738209 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738231 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738270 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738294 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738316 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738338 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738357 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738377 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738396 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738415 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738432 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738451 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738468 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738486 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738505 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738524 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738542 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738563 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738586 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738608 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738631 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738653 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738662 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738678 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738706 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738732 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738739 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738755 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738732 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738782 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738814 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738839 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738860 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738944 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.738975 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739040 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739054 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739067 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739091 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739110 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739127 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739145 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739165 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739215 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739232 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739264 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739281 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739300 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739318 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739158 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739361 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739335 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739550 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739650 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739785 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739842 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739947 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739975 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740387 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740495 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740489 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740691 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740721 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740747 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740866 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.740874 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.741095 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.741222 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.741311 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.741359 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.741651 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.741864 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742021 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742327 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742410 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742438 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.739339 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742486 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742505 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742670 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742704 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742727 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742734 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742748 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742774 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742795 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742813 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742831 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742847 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742865 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742883 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742901 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742919 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742938 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742958 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742968 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.742985 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.743019 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.743042 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.743059 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.743076 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.743092 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.743110 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.743126 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744412 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744436 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744457 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744476 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744495 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744515 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744536 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744564 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744596 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744623 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744647 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744671 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744693 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744715 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744748 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744775 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744804 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744830 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744853 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744882 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744913 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744938 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744961 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744983 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745005 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745030 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745053 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745075 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745101 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745124 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745150 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745173 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745198 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745222 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745268 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745294 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745317 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745343 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745367 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745391 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745414 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745472 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745505 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745538 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745568 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745594 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745621 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745651 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745682 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745710 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745739 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745764 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745792 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745817 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745855 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745972 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745989 4848 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746005 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746016 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746028 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746038 4848 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746047 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746058 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746070 4848 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746081 4848 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746092 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746102 4848 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746112 4848 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746122 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746134 4848 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746146 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746158 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746167 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746177 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746186 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746197 4848 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746209 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746219 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746228 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746238 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746270 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746283 4848 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746293 4848 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746305 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746320 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746334 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746346 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746356 4848 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746365 4848 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746374 4848 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746383 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746393 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746401 4848 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746410 4848 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746419 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746430 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746439 4848 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746447 4848 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746456 4848 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746465 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746473 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746527 4848 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746536 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746565 4848 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746575 4848 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746584 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746592 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746601 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746612 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746621 4848 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746631 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746644 4848 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746654 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746664 4848 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746673 4848 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746682 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746691 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746700 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746710 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746719 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.755546 4848 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.755807 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.757489 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744641 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744733 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744713 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744714 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744901 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.744923 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745154 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745331 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.745912 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746054 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746123 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746274 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746369 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746497 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746687 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.746987 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747030 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747093 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.747237 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:46:40.247183355 +0000 UTC m=+27.159400393 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.760006 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.760459 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.760662 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.761495 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747343 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747442 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747531 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747594 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747723 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747898 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747934 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.747946 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748230 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748261 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748230 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748452 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748560 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748641 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748734 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748755 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748910 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.748904 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749046 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749059 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749069 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749080 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749297 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749379 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749620 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749854 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.749951 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.750109 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.750127 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.750256 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.750290 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.750503 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.750554 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.750823 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.751096 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.751286 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.751297 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.761993 4848 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.751330 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.751373 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.751596 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.751633 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.751939 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.752398 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753067 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753198 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753438 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753509 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753529 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753704 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753820 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.753908 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754069 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754067 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754116 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754360 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754432 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754513 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754624 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.754837 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.755915 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.762293 4848 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.755362 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.756383 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.756731 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.756775 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.756815 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.758787 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.758802 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.759474 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.759533 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.759761 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.762415 4848 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.762436 4848 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.762455 4848 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.762474 4848 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.762494 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.762632 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:40.262596236 +0000 UTC m=+27.174813274 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.762683 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:40.262671618 +0000 UTC m=+27.174888656 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.764066 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.764292 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.765060 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.765091 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.765098 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.765198 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.770277 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.770515 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.770673 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.770871 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:40.270841947 +0000 UTC m=+27.183059015 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.771128 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.771682 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.771547 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.771675 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.772201 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.772379 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.772600 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.772675 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.772716 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.773295 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.774572 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.777208 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.777614 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.777754 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.777885 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:39 crc kubenswrapper[4848]: E0128 12:46:39.778066 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:40.278041767 +0000 UTC m=+27.190258835 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.779019 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.779497 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.779554 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.779554 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.779697 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.784469 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 07:30:21.972333157 +0000 UTC Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.786301 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.786481 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.786546 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.786828 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.787237 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.787525 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.787832 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.789797 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.790163 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.790403 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.791797 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.792662 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.793155 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.798874 4848 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.805774 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.806216 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.806316 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.812366 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.819999 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.825647 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.833508 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.862020 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.863524 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.863595 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.863802 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864162 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864633 4848 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864660 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864695 4848 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864723 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864743 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864754 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864764 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864775 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864785 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864794 4848 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864823 4848 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864835 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864848 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864856 4848 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.864866 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865058 4848 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865070 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865079 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865088 4848 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865098 4848 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865109 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865119 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865129 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865138 4848 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865148 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865156 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865165 4848 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865173 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865184 4848 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865193 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865203 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865214 4848 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865225 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865234 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865256 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865265 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865273 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865284 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865294 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865302 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865311 4848 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865319 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865333 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865344 4848 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865369 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865380 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865390 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865399 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865408 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865417 4848 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865426 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865436 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865447 4848 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865455 4848 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865464 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865474 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865483 4848 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865492 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865501 4848 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865509 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865519 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865527 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865537 4848 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865547 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865556 4848 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865567 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865576 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865585 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865594 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865602 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865610 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865619 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865627 4848 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865635 4848 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865645 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865653 4848 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865661 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865670 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865678 4848 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865685 4848 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865693 4848 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865701 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865710 4848 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865718 4848 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865727 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865736 4848 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865743 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865751 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865762 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865770 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865779 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865787 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865796 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865805 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865814 4848 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865823 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865831 4848 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865840 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865848 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865856 4848 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865864 4848 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865872 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865881 4848 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865890 4848 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865898 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865906 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865914 4848 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865923 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865931 4848 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865940 4848 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865950 4848 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865959 4848 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.865969 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866055 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866074 4848 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866085 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866094 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866107 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866115 4848 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866127 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866138 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.866146 4848 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.872721 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.885948 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.886755 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.895427 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.926232 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.941541 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.957622 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.959647 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.965822 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.972614 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:46:39 crc kubenswrapper[4848]: W0128 12:46:39.997041 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-aed1e7365041dd8a5b8f962f231c90ba40f5b09888facc77499af46335763293 WatchSource:0}: Error finding container aed1e7365041dd8a5b8f962f231c90ba40f5b09888facc77499af46335763293: Status 404 returned error can't find the container with id aed1e7365041dd8a5b8f962f231c90ba40f5b09888facc77499af46335763293 Jan 28 12:46:39 crc kubenswrapper[4848]: I0128 12:46:39.998767 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: W0128 12:46:40.000561 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-db218f40cd42926db295a8fc0e0d9e2ac70a3f7246c76c45bd4099ca87fadcd6 WatchSource:0}: Error finding container db218f40cd42926db295a8fc0e0d9e2ac70a3f7246c76c45bd4099ca87fadcd6: Status 404 returned error can't find the container with id db218f40cd42926db295a8fc0e0d9e2ac70a3f7246c76c45bd4099ca87fadcd6 Jan 28 12:46:40 crc kubenswrapper[4848]: W0128 12:46:40.001168 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-cb7323d8a04eb47aab3707152a02152ed3ea9009e48671ab513129b1893995fd WatchSource:0}: Error finding container cb7323d8a04eb47aab3707152a02152ed3ea9009e48671ab513129b1893995fd: Status 404 returned error can't find the container with id cb7323d8a04eb47aab3707152a02152ed3ea9009e48671ab513129b1893995fd Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.018887 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.027502 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"aed1e7365041dd8a5b8f962f231c90ba40f5b09888facc77499af46335763293"} Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.028492 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"db218f40cd42926db295a8fc0e0d9e2ac70a3f7246c76c45bd4099ca87fadcd6"} Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.030426 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"cb7323d8a04eb47aab3707152a02152ed3ea9009e48671ab513129b1893995fd"} Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.037363 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.039318 4848 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.039738 4848 scope.go:117] "RemoveContainer" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.039965 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.053473 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.072070 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.090885 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.108968 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.126448 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.139641 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.154794 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.273145 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.273392 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:46:41.273352573 +0000 UTC m=+28.185569621 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.274546 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.274774 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.274943 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.274715 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.274890 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.275207 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.275223 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.275018 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.275329 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:41.275147713 +0000 UTC m=+28.187364751 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.275350 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:41.275342449 +0000 UTC m=+28.187559687 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.275381 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:41.275374169 +0000 UTC m=+28.187591207 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.373604 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-28 12:41:39 +0000 UTC, rotation deadline is 2026-12-17 20:14:19.275192208 +0000 UTC Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.373721 4848 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7759h27m38.90149211s for next certificate rotation Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.376336 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.376592 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.376627 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.376645 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.376731 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:41.376707442 +0000 UTC m=+28.288924620 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.437440 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-wkg8g"] Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.437897 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.440527 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.440912 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.441799 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.457018 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.470386 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.477691 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8f36b4b4-c850-44cc-b422-89ce6fe024cd-hosts-file\") pod \"node-resolver-wkg8g\" (UID: \"8f36b4b4-c850-44cc-b422-89ce6fe024cd\") " pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.477748 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr2rq\" (UniqueName: \"kubernetes.io/projected/8f36b4b4-c850-44cc-b422-89ce6fe024cd-kube-api-access-nr2rq\") pod \"node-resolver-wkg8g\" (UID: \"8f36b4b4-c850-44cc-b422-89ce6fe024cd\") " pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.535325 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.555455 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.571382 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.578218 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8f36b4b4-c850-44cc-b422-89ce6fe024cd-hosts-file\") pod \"node-resolver-wkg8g\" (UID: \"8f36b4b4-c850-44cc-b422-89ce6fe024cd\") " pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.578491 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr2rq\" (UniqueName: \"kubernetes.io/projected/8f36b4b4-c850-44cc-b422-89ce6fe024cd-kube-api-access-nr2rq\") pod \"node-resolver-wkg8g\" (UID: \"8f36b4b4-c850-44cc-b422-89ce6fe024cd\") " pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.578446 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8f36b4b4-c850-44cc-b422-89ce6fe024cd-hosts-file\") pod \"node-resolver-wkg8g\" (UID: \"8f36b4b4-c850-44cc-b422-89ce6fe024cd\") " pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.629040 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.635020 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr2rq\" (UniqueName: \"kubernetes.io/projected/8f36b4b4-c850-44cc-b422-89ce6fe024cd-kube-api-access-nr2rq\") pod \"node-resolver-wkg8g\" (UID: \"8f36b4b4-c850-44cc-b422-89ce6fe024cd\") " pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.678429 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.750493 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.755019 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-wkg8g" Jan 28 12:46:40 crc kubenswrapper[4848]: W0128 12:46:40.779577 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f36b4b4_c850_44cc_b422_89ce6fe024cd.slice/crio-41bbd29fc87aa6908cf611dc38d9d4557bc6f28cd51394740411cb8523895c6b WatchSource:0}: Error finding container 41bbd29fc87aa6908cf611dc38d9d4557bc6f28cd51394740411cb8523895c6b: Status 404 returned error can't find the container with id 41bbd29fc87aa6908cf611dc38d9d4557bc6f28cd51394740411cb8523895c6b Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.785852 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 22:48:23.600377337 +0000 UTC Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.794582 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.849861 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:40 crc kubenswrapper[4848]: E0128 12:46:40.850355 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.860509 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.861736 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.863633 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.864568 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.866153 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.866855 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.867842 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.869106 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.869880 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.871205 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.872102 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.873362 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.875713 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.876308 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.876918 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.878532 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.879159 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.880376 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.881295 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.882088 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.883881 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.884615 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.885243 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.886856 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.887522 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.889031 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.889991 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.892848 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.893674 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.895010 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.895680 4848 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.895841 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.905706 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.906706 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.907225 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.910277 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.912153 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.912887 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.914648 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.915445 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.916710 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.917486 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.919040 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.920387 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.920974 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.921628 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.922869 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.924544 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.925896 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.927277 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.928353 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.929460 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.931364 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.931871 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.993437 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-vfhvz"] Jan 28 12:46:40 crc kubenswrapper[4848]: I0128 12:46:40.993852 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.001498 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.001585 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.001603 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.002670 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.002896 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.015581 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.030328 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.038817 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201"} Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.038870 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b"} Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.043281 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-wkg8g" event={"ID":"8f36b4b4-c850-44cc-b422-89ce6fe024cd","Type":"ContainerStarted","Data":"41bbd29fc87aa6908cf611dc38d9d4557bc6f28cd51394740411cb8523895c6b"} Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.045953 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.052090 4848 scope.go:117] "RemoveContainer" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.052305 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.052650 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd"} Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.059291 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.075456 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.087835 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30570a21-e260-4494-89cd-2643cb0ca288-proxy-tls\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.087927 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffq99\" (UniqueName: \"kubernetes.io/projected/30570a21-e260-4494-89cd-2643cb0ca288-kube-api-access-ffq99\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.087963 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/30570a21-e260-4494-89cd-2643cb0ca288-rootfs\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.087988 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30570a21-e260-4494-89cd-2643cb0ca288-mcd-auth-proxy-config\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.088956 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.103625 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.115315 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.126901 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.139080 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.155324 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.166978 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.182470 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.189469 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffq99\" (UniqueName: \"kubernetes.io/projected/30570a21-e260-4494-89cd-2643cb0ca288-kube-api-access-ffq99\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.189535 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/30570a21-e260-4494-89cd-2643cb0ca288-rootfs\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.189582 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30570a21-e260-4494-89cd-2643cb0ca288-mcd-auth-proxy-config\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.189632 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30570a21-e260-4494-89cd-2643cb0ca288-proxy-tls\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.189737 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/30570a21-e260-4494-89cd-2643cb0ca288-rootfs\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.190707 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30570a21-e260-4494-89cd-2643cb0ca288-mcd-auth-proxy-config\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.202073 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30570a21-e260-4494-89cd-2643cb0ca288-proxy-tls\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.203727 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.226954 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.228334 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffq99\" (UniqueName: \"kubernetes.io/projected/30570a21-e260-4494-89cd-2643cb0ca288-kube-api-access-ffq99\") pod \"machine-config-daemon-vfhvz\" (UID: \"30570a21-e260-4494-89cd-2643cb0ca288\") " pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.242670 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.262802 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.290534 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.290685 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.290730 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.290758 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.290804 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:46:43.290766372 +0000 UTC m=+30.202983420 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.290866 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.290903 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.291008 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.290939 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:43.290918317 +0000 UTC m=+30.203135425 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.291030 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.291048 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.291063 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:43.291033901 +0000 UTC m=+30.203251129 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.291101 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:43.291089402 +0000 UTC m=+30.203306650 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.291868 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.312526 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.325328 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.359385 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.391361 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.391603 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.391633 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.391652 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.391706 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:43.391690354 +0000 UTC m=+30.303907382 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.420109 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-bmnpt"] Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.420548 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.420957 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-96648"] Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.421733 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.426932 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.427431 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.431059 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.431488 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.431674 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.431872 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.436866 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.448542 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.473219 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.508528 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/52f51c55-df27-4e41-b7c5-e3d714909803-cni-binary-copy\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.508882 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-socket-dir-parent\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.509025 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-cnibin\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.509157 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-system-cni-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.509343 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-cni-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.509463 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-k8s-cni-cncf-io\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.509559 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-netns\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.509652 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-cni-bin\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.509748 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-os-release\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.541038 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.574135 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.592348 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.603607 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.610951 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fa3c7276-5cf5-47da-afc7-eb68e028f483-cni-binary-copy\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611021 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-tuning-conf-dir\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611044 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/52f51c55-df27-4e41-b7c5-e3d714909803-multus-daemon-config\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611071 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-os-release\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611092 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-conf-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611228 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fa3c7276-5cf5-47da-afc7-eb68e028f483-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611314 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/52f51c55-df27-4e41-b7c5-e3d714909803-cni-binary-copy\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611403 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-socket-dir-parent\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611473 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-cnibin\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611503 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-system-cni-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611523 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-system-cni-dir\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611544 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z4fj\" (UniqueName: \"kubernetes.io/projected/fa3c7276-5cf5-47da-afc7-eb68e028f483-kube-api-access-7z4fj\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611691 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-system-cni-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611761 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-socket-dir-parent\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611779 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-cni-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611850 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-cni-multus\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611872 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-multus-certs\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611897 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-netns\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611928 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-kubelet\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611949 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-k8s-cni-cncf-io\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611966 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-netns\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.611974 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-cni-bin\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612008 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-cni-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612021 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-cni-bin\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612010 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-k8s-cni-cncf-io\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612034 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-os-release\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612149 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-cnibin\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612155 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-os-release\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612171 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-etc-kubernetes\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612194 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fstpn\" (UniqueName: \"kubernetes.io/projected/52f51c55-df27-4e41-b7c5-e3d714909803-kube-api-access-fstpn\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612212 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-cnibin\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612236 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-hostroot\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.612548 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/52f51c55-df27-4e41-b7c5-e3d714909803-cni-binary-copy\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.616738 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.629305 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.641216 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.654660 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.667138 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.681128 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.695053 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713646 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-etc-kubernetes\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713720 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fstpn\" (UniqueName: \"kubernetes.io/projected/52f51c55-df27-4e41-b7c5-e3d714909803-kube-api-access-fstpn\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713747 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-hostroot\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713808 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/52f51c55-df27-4e41-b7c5-e3d714909803-multus-daemon-config\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713835 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fa3c7276-5cf5-47da-afc7-eb68e028f483-cni-binary-copy\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713858 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-tuning-conf-dir\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713872 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-etc-kubernetes\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713892 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-os-release\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.713968 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-os-release\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714010 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-conf-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714078 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fa3c7276-5cf5-47da-afc7-eb68e028f483-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714115 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-cnibin\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714152 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-system-cni-dir\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714177 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z4fj\" (UniqueName: \"kubernetes.io/projected/fa3c7276-5cf5-47da-afc7-eb68e028f483-kube-api-access-7z4fj\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714210 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-cni-multus\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714235 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-multus-certs\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714292 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-kubelet\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714345 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-hostroot\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714398 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-kubelet\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714442 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-multus-conf-dir\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.714982 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-system-cni-dir\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.715056 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-run-multus-certs\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.715114 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/52f51c55-df27-4e41-b7c5-e3d714909803-multus-daemon-config\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.715101 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/52f51c55-df27-4e41-b7c5-e3d714909803-host-var-lib-cni-multus\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.715137 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-cnibin\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.715588 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fa3c7276-5cf5-47da-afc7-eb68e028f483-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.715836 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fa3c7276-5cf5-47da-afc7-eb68e028f483-cni-binary-copy\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.716737 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fa3c7276-5cf5-47da-afc7-eb68e028f483-tuning-conf-dir\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.718738 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.728453 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.732611 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fstpn\" (UniqueName: \"kubernetes.io/projected/52f51c55-df27-4e41-b7c5-e3d714909803-kube-api-access-fstpn\") pod \"multus-bmnpt\" (UID: \"52f51c55-df27-4e41-b7c5-e3d714909803\") " pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.733633 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z4fj\" (UniqueName: \"kubernetes.io/projected/fa3c7276-5cf5-47da-afc7-eb68e028f483-kube-api-access-7z4fj\") pod \"multus-additional-cni-plugins-96648\" (UID: \"fa3c7276-5cf5-47da-afc7-eb68e028f483\") " pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.740911 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.743373 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bmnpt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.750597 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-96648" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.758088 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: W0128 12:46:41.765954 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52f51c55_df27_4e41_b7c5_e3d714909803.slice/crio-8e07aa7ac093288c50ec5ebaa61c802a8e9b99a2ce24bf3b77736b5858d1996d WatchSource:0}: Error finding container 8e07aa7ac093288c50ec5ebaa61c802a8e9b99a2ce24bf3b77736b5858d1996d: Status 404 returned error can't find the container with id 8e07aa7ac093288c50ec5ebaa61c802a8e9b99a2ce24bf3b77736b5858d1996d Jan 28 12:46:41 crc kubenswrapper[4848]: W0128 12:46:41.767931 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa3c7276_5cf5_47da_afc7_eb68e028f483.slice/crio-52a584f0fc2bef7d67fe533efe89336a7aa9ba06eca57a48c4b36f2868be82d0 WatchSource:0}: Error finding container 52a584f0fc2bef7d67fe533efe89336a7aa9ba06eca57a48c4b36f2868be82d0: Status 404 returned error can't find the container with id 52a584f0fc2bef7d67fe533efe89336a7aa9ba06eca57a48c4b36f2868be82d0 Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.773363 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.787181 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 07:53:36.988051148 +0000 UTC Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.787315 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.800462 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.816088 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g9vht"] Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.817067 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.819978 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.820138 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.820389 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.820409 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.820543 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.820705 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.821506 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.822129 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.837422 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.849158 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.849329 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.849747 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:41 crc kubenswrapper[4848]: E0128 12:46:41.849813 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.850015 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.861487 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.874438 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.894555 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.908031 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916618 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-etc-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916671 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-bin\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916692 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-env-overrides\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916728 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-ovn-kubernetes\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916753 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-kubelet\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916786 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-systemd-units\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916823 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-systemd\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916848 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-netns\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916869 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-node-log\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916891 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-netd\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916917 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916960 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-var-lib-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.916993 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.917015 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr5bz\" (UniqueName: \"kubernetes.io/projected/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-kube-api-access-rr5bz\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.917059 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovn-node-metrics-cert\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.917096 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-script-lib\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.917123 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-ovn\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.917149 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-log-socket\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.917171 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-config\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.917212 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-slash\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.922397 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.933480 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.957301 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:41 crc kubenswrapper[4848]: I0128 12:46:41.972076 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.010572 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018422 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-systemd\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018585 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-netns\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018610 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-node-log\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018612 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-systemd\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018636 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-netd\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018687 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-netd\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018735 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-netns\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018819 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-node-log\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018934 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.018971 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-var-lib-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019236 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019285 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr5bz\" (UniqueName: \"kubernetes.io/projected/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-kube-api-access-rr5bz\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019296 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019312 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-ovn\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019376 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-var-lib-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019392 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-log-socket\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019414 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovn-node-metrics-cert\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019425 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019483 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-script-lib\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019515 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-config\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019564 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-slash\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019590 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-etc-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019609 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-bin\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019626 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-env-overrides\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019655 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-ovn-kubernetes\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019679 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-kubelet\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019707 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-systemd-units\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019760 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-systemd-units\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019972 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-log-socket\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.019357 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-ovn\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.020035 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-bin\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.020083 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-etc-openvswitch\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.020115 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-ovn-kubernetes\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.020185 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-kubelet\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.020512 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-slash\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.021525 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-config\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.023171 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-env-overrides\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.023350 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-script-lib\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.025580 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovn-node-metrics-cert\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.057022 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerStarted","Data":"8e07aa7ac093288c50ec5ebaa61c802a8e9b99a2ce24bf3b77736b5858d1996d"} Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.059155 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerStarted","Data":"52a584f0fc2bef7d67fe533efe89336a7aa9ba06eca57a48c4b36f2868be82d0"} Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.067130 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782"} Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.067208 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f"} Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.067229 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"8b778378e2eb819a8fd8342abbd0fdb17b696c5d5d0232fb43ee0200f935bf84"} Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.067839 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr5bz\" (UniqueName: \"kubernetes.io/projected/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-kube-api-access-rr5bz\") pod \"ovnkube-node-g9vht\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.071084 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-wkg8g" event={"ID":"8f36b4b4-c850-44cc-b422-89ce6fe024cd","Type":"ContainerStarted","Data":"4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4"} Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.082484 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.110547 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.152649 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.193636 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.233718 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.270805 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.299824 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.311268 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: W0128 12:46:42.315964 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda67a8b01_b8a6_4ca0_96fb_d5af26125a8d.slice/crio-d720d7463c7132c730a439c10bf953881337e050739bb4d9e5f90afc1f6ed34c WatchSource:0}: Error finding container d720d7463c7132c730a439c10bf953881337e050739bb4d9e5f90afc1f6ed34c: Status 404 returned error can't find the container with id d720d7463c7132c730a439c10bf953881337e050739bb4d9e5f90afc1f6ed34c Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.349299 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.391344 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.431352 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.476608 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.511210 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.551794 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.592746 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.634601 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.670930 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.711879 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.788172 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 14:58:02.357170257 +0000 UTC Jan 28 12:46:42 crc kubenswrapper[4848]: I0128 12:46:42.849357 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:42 crc kubenswrapper[4848]: E0128 12:46:42.849551 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.076664 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3" exitCode=0 Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.076834 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.076875 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"d720d7463c7132c730a439c10bf953881337e050739bb4d9e5f90afc1f6ed34c"} Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.079683 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerStarted","Data":"c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade"} Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.081114 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerStarted","Data":"af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e"} Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.093165 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.104817 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.107394 4848 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.121087 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.141367 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.161602 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.182451 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.198219 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.212588 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.225720 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.237929 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.252106 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.264634 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.294451 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.308871 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.332160 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.332333 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.332326 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332411 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:46:47.332372811 +0000 UTC m=+34.244589869 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332448 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.332484 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332516 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:47.332494754 +0000 UTC m=+34.244711952 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.332558 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332793 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332836 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332849 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332893 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:47.332885245 +0000 UTC m=+34.245102283 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332940 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.332982 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:47.332971027 +0000 UTC m=+34.245188055 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.371615 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.409656 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.433627 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.433930 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.433983 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.434000 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.434093 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:47.434058614 +0000 UTC m=+34.346275812 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.449542 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.489637 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.538162 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.572760 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.609866 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.653925 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.692639 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.730583 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.775684 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.788659 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 18:59:23.45765995 +0000 UTC Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.849741 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.849917 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:43 crc kubenswrapper[4848]: I0128 12:46:43.850570 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:43 crc kubenswrapper[4848]: E0128 12:46:43.850649 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.036262 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-7pzvm"] Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.036814 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.040430 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.040664 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.041025 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.042769 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.063092 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.080007 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.086631 4848 generic.go:334] "Generic (PLEG): container finished" podID="fa3c7276-5cf5-47da-afc7-eb68e028f483" containerID="af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e" exitCode=0 Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.086682 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerDied","Data":"af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e"} Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.090880 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.093198 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0"} Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.102905 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.125702 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.141634 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.154611 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b5333926-d7c2-4039-84d7-b6b063baa185-serviceca\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.154664 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx99h\" (UniqueName: \"kubernetes.io/projected/b5333926-d7c2-4039-84d7-b6b063baa185-kube-api-access-qx99h\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.156635 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5333926-d7c2-4039-84d7-b6b063baa185-host\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.161677 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.175876 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.189914 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.210907 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.250589 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.257272 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5333926-d7c2-4039-84d7-b6b063baa185-host\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.257338 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b5333926-d7c2-4039-84d7-b6b063baa185-serviceca\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.257356 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx99h\" (UniqueName: \"kubernetes.io/projected/b5333926-d7c2-4039-84d7-b6b063baa185-kube-api-access-qx99h\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.257440 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5333926-d7c2-4039-84d7-b6b063baa185-host\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.258654 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b5333926-d7c2-4039-84d7-b6b063baa185-serviceca\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.296001 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx99h\" (UniqueName: \"kubernetes.io/projected/b5333926-d7c2-4039-84d7-b6b063baa185-kube-api-access-qx99h\") pod \"node-ca-7pzvm\" (UID: \"b5333926-d7c2-4039-84d7-b6b063baa185\") " pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.308142 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.332310 4848 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 28 12:46:44 crc kubenswrapper[4848]: W0128 12:46:44.334129 4848 reflector.go:484] object-"openshift-image-registry"/"node-ca-dockercfg-4777p": watch of *v1.Secret ended with: very short watch: object-"openshift-image-registry"/"node-ca-dockercfg-4777p": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:46:44 crc kubenswrapper[4848]: W0128 12:46:44.334530 4848 reflector.go:484] object-"openshift-image-registry"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:46:44 crc kubenswrapper[4848]: W0128 12:46:44.335270 4848 reflector.go:484] object-"openshift-image-registry"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:46:44 crc kubenswrapper[4848]: W0128 12:46:44.335777 4848 reflector.go:484] object-"openshift-image-registry"/"image-registry-certificates": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"image-registry-certificates": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.359067 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.362846 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7pzvm" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.392328 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.437712 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.471750 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.508255 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.555806 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.593463 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.634447 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.672281 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.712555 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.749507 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.788879 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 07:51:36.491559996 +0000 UTC Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.793179 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.830469 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.849283 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:44 crc kubenswrapper[4848]: E0128 12:46:44.849782 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.870699 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.910565 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.955531 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:44 crc kubenswrapper[4848]: I0128 12:46:44.989389 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.031796 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.070212 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.098482 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerStarted","Data":"8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47"} Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.101230 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.101316 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.102592 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7pzvm" event={"ID":"b5333926-d7c2-4039-84d7-b6b063baa185","Type":"ContainerStarted","Data":"c38c868c409a0907b191b3100692a02a6783311a7ddd1a61bdadd5661ec19895"} Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.111183 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.159661 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.160606 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.211099 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.247492 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.290315 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.331088 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.378073 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.414177 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.453788 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.493771 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.535681 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.568691 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.582035 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.672572 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.700224 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.789645 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 06:29:04.789394851 +0000 UTC Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.849544 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:45 crc kubenswrapper[4848]: E0128 12:46:45.849751 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:45 crc kubenswrapper[4848]: I0128 12:46:45.850165 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:45 crc kubenswrapper[4848]: E0128 12:46:45.850236 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.114559 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.114635 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.114657 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.119223 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7pzvm" event={"ID":"b5333926-d7c2-4039-84d7-b6b063baa185","Type":"ContainerStarted","Data":"c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.134320 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.151982 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.174374 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.201837 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.226791 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.242508 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.258476 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.274513 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.295042 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.308434 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.326949 4848 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.327140 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.329505 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.329591 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.329696 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.329845 4848 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.340955 4848 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.341320 4848 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.342639 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.342684 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.342701 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.342721 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.342733 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.347093 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.365278 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: E0128 12:46:46.366070 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.370910 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.370962 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.370972 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.370992 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.371004 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.382562 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: E0128 12:46:46.386235 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.390603 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.390640 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.390648 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.390663 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.390674 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.398713 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: E0128 12:46:46.405647 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.410964 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.411017 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.411034 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.411055 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.411069 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.415098 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: E0128 12:46:46.428207 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.429672 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.433606 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.433649 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.433661 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.433678 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.433694 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: E0128 12:46:46.451377 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: E0128 12:46:46.451580 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.455500 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.455560 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.455571 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.455591 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.455604 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.460609 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.488293 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.505198 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.522450 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.554575 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.558824 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.558877 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.558888 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.558908 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.558922 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.592026 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.632355 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.662278 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.662319 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.662328 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.662347 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.662360 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.676004 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.712866 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.755715 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.768345 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.768408 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.768454 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.768479 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.768492 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.791680 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 19:42:14.572796047 +0000 UTC Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.797585 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:46Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.849143 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:46 crc kubenswrapper[4848]: E0128 12:46:46.849542 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.871328 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.871378 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.871388 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.871409 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.871422 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.975596 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.975667 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.975680 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.975700 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:46 crc kubenswrapper[4848]: I0128 12:46:46.975721 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:46Z","lastTransitionTime":"2026-01-28T12:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.080903 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.080959 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.080970 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.080991 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.081002 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.124917 4848 generic.go:334] "Generic (PLEG): container finished" podID="fa3c7276-5cf5-47da-afc7-eb68e028f483" containerID="8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47" exitCode=0 Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.125050 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerDied","Data":"8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.143631 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.158374 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.176826 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.183915 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.183963 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.183977 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.183998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.184012 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.190061 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.207353 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.220668 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.248010 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.269162 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.284188 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.286211 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.286239 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.286275 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.286291 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.286303 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.297791 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.315406 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.335041 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.353676 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.368892 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.389914 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.389998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.390012 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.390036 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.390467 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.395726 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.395878 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.395909 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.395951 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396054 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:46:55.396008491 +0000 UTC m=+42.308225679 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396085 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396104 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396156 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:55.396140845 +0000 UTC m=+42.308357883 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396189 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:55.396167415 +0000 UTC m=+42.308384453 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396107 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396219 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396236 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.396281 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:55.396274358 +0000 UTC m=+42.308491396 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.493861 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.494276 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.494406 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.494549 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.494632 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.497474 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.497733 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.497781 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.497796 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.497871 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:55.497849347 +0000 UTC m=+42.410066385 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.598155 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.598190 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.598198 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.598213 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.598224 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.701419 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.701488 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.701496 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.701510 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.701522 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.792095 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 12:00:00.230767357 +0000 UTC Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.804483 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.804514 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.804523 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.804538 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.804549 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.849025 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.849155 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.849329 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:47 crc kubenswrapper[4848]: E0128 12:46:47.849430 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.907411 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.907460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.907471 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.907489 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:47 crc kubenswrapper[4848]: I0128 12:46:47.907501 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:47Z","lastTransitionTime":"2026-01-28T12:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.010107 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.010190 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.010219 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.010280 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.010295 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.113732 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.113785 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.113798 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.113814 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.113828 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.132215 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerStarted","Data":"2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.148157 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.164975 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.180219 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.191765 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.205516 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.217594 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.217645 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.217654 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.217671 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.217694 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.220896 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.240413 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.262358 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.279178 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.295026 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.310087 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.320332 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.320388 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.320400 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.320419 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.320433 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.327655 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.341465 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.356436 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:48Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.422937 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.422986 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.422999 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.423017 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.423030 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.525921 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.526394 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.526423 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.526445 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.526456 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.629056 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.629097 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.629107 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.629122 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.629135 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.732186 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.732458 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.732472 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.732494 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.732516 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.792793 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 11:02:22.43069747 +0000 UTC Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.835054 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.835094 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.835106 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.835125 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.835139 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.849890 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:48 crc kubenswrapper[4848]: E0128 12:46:48.850094 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.940221 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.940289 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.940303 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.940324 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:48 crc kubenswrapper[4848]: I0128 12:46:48.940345 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:48Z","lastTransitionTime":"2026-01-28T12:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.043142 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.043187 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.043201 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.043224 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.043269 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.145460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.145511 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.145526 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.145544 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.145555 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.146412 4848 generic.go:334] "Generic (PLEG): container finished" podID="fa3c7276-5cf5-47da-afc7-eb68e028f483" containerID="2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5" exitCode=0 Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.146481 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerDied","Data":"2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.164752 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.181695 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.202707 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.221086 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.235341 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.249229 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.249293 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.249305 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.249323 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.249368 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.252220 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.265635 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.281709 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.296224 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.310089 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.326965 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.336770 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.350992 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.352416 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.352447 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.352456 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.352471 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.352481 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.365071 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.455495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.455553 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.455568 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.455591 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.455603 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.558582 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.558629 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.558641 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.558660 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.558672 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.661712 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.661784 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.661795 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.661820 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.661833 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.764906 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.764960 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.764974 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.764996 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.765011 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.793322 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 14:01:16.960659921 +0000 UTC Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.848995 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:49 crc kubenswrapper[4848]: E0128 12:46:49.849163 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.849583 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:49 crc kubenswrapper[4848]: E0128 12:46:49.849728 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.867884 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.867923 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.867931 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.867946 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.867955 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.970921 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.970968 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.970982 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.971001 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:49 crc kubenswrapper[4848]: I0128 12:46:49.971014 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:49Z","lastTransitionTime":"2026-01-28T12:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.075068 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.075128 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.075142 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.075164 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.075177 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.154327 4848 generic.go:334] "Generic (PLEG): container finished" podID="fa3c7276-5cf5-47da-afc7-eb68e028f483" containerID="813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244" exitCode=0 Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.154423 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerDied","Data":"813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.160538 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.176727 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.179200 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.179403 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.179557 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.179585 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.179824 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.204558 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.240751 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.262263 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.284500 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.284565 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.284577 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.284600 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.284612 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.284487 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.301875 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.314856 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.329497 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.344926 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.360045 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.376128 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.390878 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.390915 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.390925 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.390943 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.390955 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.392422 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.409471 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.434019 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.494396 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.494444 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.494517 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.494538 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.494552 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.597638 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.597682 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.597692 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.597712 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.597724 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.700314 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.700353 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.700369 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.700386 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.700397 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.793558 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 15:29:18.371343237 +0000 UTC Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.921034 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.921131 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.921148 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.921174 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.921185 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:50Z","lastTransitionTime":"2026-01-28T12:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:50 crc kubenswrapper[4848]: I0128 12:46:50.921860 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:50 crc kubenswrapper[4848]: E0128 12:46:50.922017 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.023936 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.023974 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.023986 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.024002 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.024014 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.127040 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.127081 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.127091 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.127107 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.127118 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.168629 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerStarted","Data":"663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.184095 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.196964 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.218393 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.230998 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.244117 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.244087 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.244177 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.244190 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.244212 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.244228 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.265978 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.281697 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.298332 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.312880 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.325229 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.339765 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.347601 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.347646 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.347656 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.347674 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.347683 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.353496 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.376026 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.390631 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:51Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.450765 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.450810 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.450824 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.450843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.450857 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.552921 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.553543 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.553559 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.553578 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.553591 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.657119 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.657174 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.657185 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.657206 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.657220 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.760085 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.760127 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.760139 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.760157 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.760169 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.794764 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 11:33:55.350115438 +0000 UTC Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.849771 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:51 crc kubenswrapper[4848]: E0128 12:46:51.850168 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.850202 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:51 crc kubenswrapper[4848]: E0128 12:46:51.850609 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.850966 4848 scope.go:117] "RemoveContainer" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.862569 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.862605 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.862614 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.862629 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.862640 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.965955 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.965994 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.966004 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.966020 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:51 crc kubenswrapper[4848]: I0128 12:46:51.966030 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:51Z","lastTransitionTime":"2026-01-28T12:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.069322 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.069375 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.069390 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.069447 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.069462 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.174138 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.174176 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.174189 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.174212 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.174228 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.276887 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.276938 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.276957 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.276975 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.276986 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.379600 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.379654 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.379667 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.379688 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.379702 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.483042 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.483081 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.483092 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.483111 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.483122 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.587028 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.587068 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.587078 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.587095 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.587105 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.690432 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.690485 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.690496 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.690512 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.690523 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.794331 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.794368 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.794379 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.794396 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.794405 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.795028 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-06 20:02:37.619254256 +0000 UTC Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.852359 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:52 crc kubenswrapper[4848]: E0128 12:46:52.852528 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.897117 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.897218 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.897230 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.897266 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.897278 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:52Z","lastTransitionTime":"2026-01-28T12:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.999941 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.999987 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:52 crc kubenswrapper[4848]: I0128 12:46:52.999999 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.000018 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.000029 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.102822 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.102848 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.102855 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.102869 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.102878 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.183228 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.186324 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.186923 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.191800 4848 generic.go:334] "Generic (PLEG): container finished" podID="fa3c7276-5cf5-47da-afc7-eb68e028f483" containerID="663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667" exitCode=0 Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.191858 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerDied","Data":"663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.198596 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.199501 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.199563 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.199577 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.199930 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.249109 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.249459 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.249471 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.249489 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.249499 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.259808 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.262988 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.267996 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.279886 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.297404 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.321093 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.333412 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.346476 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.355008 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.355042 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.355052 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.355068 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.355080 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.363410 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.377402 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.389526 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.419091 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.430681 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.449133 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.457811 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.457843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.457852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.457867 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.457877 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.463274 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.477338 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.493563 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.505622 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.515774 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.526372 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.536831 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.560024 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.560882 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.560928 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.560941 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.560961 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.560974 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.570826 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.582948 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.593526 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.610692 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.628155 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.641782 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.651228 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.663583 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.663624 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.663632 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.663648 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.663658 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.766662 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.766720 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.766731 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.766746 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.766757 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.795615 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 02:01:28.512542253 +0000 UTC Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.849215 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:53 crc kubenswrapper[4848]: E0128 12:46:53.849410 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.849481 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:53 crc kubenswrapper[4848]: E0128 12:46:53.849527 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.870064 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.870123 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.870136 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.870159 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.870174 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.917920 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz"] Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.918599 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.920478 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.921335 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.935446 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.951686 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.959681 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xp64\" (UniqueName: \"kubernetes.io/projected/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-kube-api-access-8xp64\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.959720 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.959754 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-env-overrides\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.959801 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.972739 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.972778 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.972788 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.972803 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.972812 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:53Z","lastTransitionTime":"2026-01-28T12:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.973084 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:53 crc kubenswrapper[4848]: I0128 12:46:53.987718 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.001171 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.013624 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.033910 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.046195 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.061272 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-env-overrides\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.061374 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.061406 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xp64\" (UniqueName: \"kubernetes.io/projected/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-kube-api-access-8xp64\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.061453 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.062103 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-env-overrides\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.062331 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.062679 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.067809 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.075380 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.075999 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.076032 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.076044 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.076062 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.076076 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.077837 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xp64\" (UniqueName: \"kubernetes.io/projected/a8b8c4d9-ca19-4ac2-8c3e-bdc023238593-kube-api-access-8xp64\") pod \"ovnkube-control-plane-749d76644c-lsfjz\" (UID: \"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.090456 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.104164 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.116044 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.128888 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.140434 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.179192 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.179263 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.179278 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.179300 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.179315 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.206535 4848 generic.go:334] "Generic (PLEG): container finished" podID="fa3c7276-5cf5-47da-afc7-eb68e028f483" containerID="d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de" exitCode=0 Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.206609 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerDied","Data":"d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.227619 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.233136 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.241357 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: W0128 12:46:54.247730 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8b8c4d9_ca19_4ac2_8c3e_bdc023238593.slice/crio-364889bf274683445f060602d57c4150c3caeb38d13de6295350f8007e666315 WatchSource:0}: Error finding container 364889bf274683445f060602d57c4150c3caeb38d13de6295350f8007e666315: Status 404 returned error can't find the container with id 364889bf274683445f060602d57c4150c3caeb38d13de6295350f8007e666315 Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.255852 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.278494 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.284998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.285032 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.285041 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.285061 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.285072 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.290725 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.305721 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.317311 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.333187 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.343103 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.355725 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.365552 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.378715 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.387407 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.387438 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.387448 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.387463 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.387472 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.390727 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.410326 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.426927 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.489932 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.490293 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.490373 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.490461 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.490533 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.593533 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.593576 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.593587 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.593604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.593616 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.696292 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.696335 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.696347 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.696368 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.696379 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.796368 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 12:52:35.850492271 +0000 UTC Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.798366 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.798411 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.798428 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.798448 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.798462 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.850567 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:54 crc kubenswrapper[4848]: E0128 12:46:54.850741 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.866506 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.880653 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.895176 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.900800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.900852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.900867 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.900887 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.900929 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:54Z","lastTransitionTime":"2026-01-28T12:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.909551 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.948379 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.969193 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.988378 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-wqtnc"] Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.988940 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:54 crc kubenswrapper[4848]: E0128 12:46:54.989015 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:46:54 crc kubenswrapper[4848]: I0128 12:46:54.991003 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:54Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.003833 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.003881 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.003892 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.003911 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.003923 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.010798 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.025661 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.038299 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.056197 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.074191 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.075630 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.075718 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m624n\" (UniqueName: \"kubernetes.io/projected/8d447736-dd38-45b5-be15-2380dc55ad3d-kube-api-access-m624n\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.092033 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.104827 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.106404 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.106451 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.106460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.106479 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.106493 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.116231 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.129622 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.142851 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.163628 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.175813 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.176561 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.176599 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m624n\" (UniqueName: \"kubernetes.io/projected/8d447736-dd38-45b5-be15-2380dc55ad3d-kube-api-access-m624n\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.176805 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.176944 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:46:55.676910227 +0000 UTC m=+42.589127465 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.191811 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.192998 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m624n\" (UniqueName: \"kubernetes.io/projected/8d447736-dd38-45b5-be15-2380dc55ad3d-kube-api-access-m624n\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.201926 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.209028 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.209078 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.209091 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.209108 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.209118 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.212749 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" event={"ID":"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593","Type":"ContainerStarted","Data":"364889bf274683445f060602d57c4150c3caeb38d13de6295350f8007e666315"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.215033 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.227944 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.240502 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.254176 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.267727 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.278553 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.292863 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.304930 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.312308 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.312395 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.312407 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.312425 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.312436 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.317009 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.327216 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.415693 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.415738 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.415770 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.415788 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.415798 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.478832 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.478966 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:47:11.47893943 +0000 UTC m=+58.391156478 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.479005 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.479045 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.479075 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479163 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479189 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479221 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:11.479209087 +0000 UTC m=+58.391426125 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479236 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:11.479229188 +0000 UTC m=+58.391446226 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479379 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479416 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479447 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.479543 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:11.479517916 +0000 UTC m=+58.391735034 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.519215 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.519309 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.519326 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.519370 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.519382 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.580322 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.580711 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.580759 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.580775 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.580863 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:11.580836489 +0000 UTC m=+58.493053717 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.622132 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.622190 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.622207 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.622229 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.622261 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.681641 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.681937 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.682081 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:46:56.682047138 +0000 UTC m=+43.594264216 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.731176 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.731235 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.731260 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.731280 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.731290 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.797505 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 15:49:56.042028535 +0000 UTC Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.834759 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.834802 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.834818 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.834839 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.834850 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.849163 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.849366 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.849505 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:55 crc kubenswrapper[4848]: E0128 12:46:55.849601 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.937747 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.937778 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.937786 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.937800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:55 crc kubenswrapper[4848]: I0128 12:46:55.937809 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:55Z","lastTransitionTime":"2026-01-28T12:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.041081 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.041135 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.041149 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.041172 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.041184 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.144152 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.144208 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.144219 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.144237 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.144263 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.218859 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" event={"ID":"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593","Type":"ContainerStarted","Data":"a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.272419 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.272453 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.272464 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.272480 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.272490 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.374703 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.374774 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.374786 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.374808 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.374822 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.478130 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.478183 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.478193 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.478216 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.478237 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.581295 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.581334 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.581344 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.581362 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.581376 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.637355 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.637395 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.637405 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.637423 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.637436 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.648321 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.651992 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.652023 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.652032 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.652047 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.652060 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.663856 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.667228 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.667277 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.667289 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.667310 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.667323 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.680882 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.686052 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.686109 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.686127 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.686153 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.686174 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.704481 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.704832 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.705003 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:46:58.704972961 +0000 UTC m=+45.617190179 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.706809 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.711025 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.711075 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.711086 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.711106 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.711118 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.723268 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.723393 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.725132 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.725165 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.725175 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.725194 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.725207 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.797825 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 04:38:25.634906169 +0000 UTC Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.829276 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.829374 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.829411 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.829433 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.829450 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.849637 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.849730 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.849840 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:46:56 crc kubenswrapper[4848]: E0128 12:46:56.850006 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.932781 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.932833 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.932845 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.932865 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:56 crc kubenswrapper[4848]: I0128 12:46:56.932878 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:56Z","lastTransitionTime":"2026-01-28T12:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.035849 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.035896 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.035926 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.035942 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.035953 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.138204 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.138297 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.138309 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.138324 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.138337 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.224881 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" event={"ID":"fa3c7276-5cf5-47da-afc7-eb68e028f483","Type":"ContainerStarted","Data":"cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.240566 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.240607 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.240617 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.240633 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.240644 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.342526 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.342576 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.342590 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.342610 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.342623 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.449961 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.450031 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.450047 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.450072 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.450093 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.553053 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.553106 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.553115 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.553133 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.553146 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.655892 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.655925 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.655937 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.655952 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.655962 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.759632 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.759674 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.759685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.759702 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.759712 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.798557 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 21:11:11.526560686 +0000 UTC Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.849075 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:57 crc kubenswrapper[4848]: E0128 12:46:57.849325 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.849074 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:57 crc kubenswrapper[4848]: E0128 12:46:57.849557 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.863940 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.864029 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.864055 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.864090 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.864116 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.967773 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.967912 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.967930 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.967956 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:57 crc kubenswrapper[4848]: I0128 12:46:57.967969 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:57Z","lastTransitionTime":"2026-01-28T12:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.071167 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.071261 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.071282 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.071308 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.071325 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.174418 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.174482 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.174495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.174513 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.174525 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.245170 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.261081 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.277005 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.277738 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.277789 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.277806 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.277828 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.277844 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.288029 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.297850 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.309634 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.321942 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.338170 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.349436 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.365500 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.380021 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.380810 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.380864 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.380876 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.380895 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.380910 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.393014 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.409654 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.422002 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.436120 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.448802 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:58Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.484832 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.484887 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.484896 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.484913 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.484924 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.587604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.587670 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.587688 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.587714 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.587732 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.690696 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.690736 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.690747 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.690764 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.690780 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.721691 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:58 crc kubenswrapper[4848]: E0128 12:46:58.721948 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:58 crc kubenswrapper[4848]: E0128 12:46:58.722049 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:47:02.722028814 +0000 UTC m=+49.634245852 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.793147 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.793218 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.793234 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.793277 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.793313 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.799586 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 16:04:51.170328655 +0000 UTC Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.849475 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.849543 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:46:58 crc kubenswrapper[4848]: E0128 12:46:58.849693 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:46:58 crc kubenswrapper[4848]: E0128 12:46:58.849822 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.895743 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.895787 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.895806 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.895825 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.895840 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.998840 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.998900 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.998912 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.998935 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:58 crc kubenswrapper[4848]: I0128 12:46:58.998958 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:58Z","lastTransitionTime":"2026-01-28T12:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.101618 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.101870 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.101879 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.101898 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.101908 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.209261 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.209304 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.209316 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.209334 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.209346 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.231777 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" event={"ID":"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593","Type":"ContainerStarted","Data":"edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.246735 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.261502 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.277722 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.288272 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.298831 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.312295 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.312342 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.312353 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.312371 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.312384 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.318357 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.336746 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.350193 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.361017 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.372763 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.383813 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.397897 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.410732 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.420550 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.420580 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.420590 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.420604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.420617 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.423415 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.435040 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.446033 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:46:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.522894 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.522940 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.522951 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.522969 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.522980 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.643784 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.643814 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.643823 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.643839 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.643848 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.746363 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.746435 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.746445 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.746459 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.746471 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.799951 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 13:47:35.541183123 +0000 UTC Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.848441 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.848479 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.848498 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.848518 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.848531 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.848899 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.848903 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:46:59 crc kubenswrapper[4848]: E0128 12:46:59.848994 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:46:59 crc kubenswrapper[4848]: E0128 12:46:59.849052 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.951400 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.951460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.951484 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.951513 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:46:59 crc kubenswrapper[4848]: I0128 12:46:59.951533 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:46:59Z","lastTransitionTime":"2026-01-28T12:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.054149 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.054206 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.054222 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.054268 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.054287 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.157333 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.157389 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.157402 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.157423 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.157439 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.237556 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/0.log" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.240594 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47" exitCode=1 Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.240657 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.242118 4848 scope.go:117] "RemoveContainer" containerID="9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.258897 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.261509 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.261560 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.261573 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.261596 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.261611 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.277588 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.305577 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.320295 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.338097 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.350682 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.363598 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.364773 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.364802 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.364812 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.364831 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.364843 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.383574 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.410346 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:00Z\\\",\\\"message\\\":\\\"6:59.998278 6087 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 12:46:59.998312 6087 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 12:46:59.999525 6087 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 12:46:59.999548 6087 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 12:46:59.999565 6087 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 12:46:59.999592 6087 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 12:46:59.999639 6087 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 12:46:59.999673 6087 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 12:46:59.999680 6087 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 12:46:59.999713 6087 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 12:46:59.999729 6087 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 12:46:59.999739 6087 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 12:46:59.999756 6087 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 12:46:59.999768 6087 factory.go:656] Stopping watch factory\\\\nI0128 12:46:59.999772 6087 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 12:46:59.999786 6087 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:46:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.424857 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.439582 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.453940 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.466431 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.467584 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.467627 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.467643 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.467667 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.467684 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.480754 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.499595 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.513893 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.570655 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.570698 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.570707 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.570726 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.570736 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.674281 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.674618 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.674724 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.674835 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.674975 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.777833 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.777884 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.777894 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.777916 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.777956 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.800353 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 16:50:54.18970105 +0000 UTC Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.849621 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.849687 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:00 crc kubenswrapper[4848]: E0128 12:47:00.850219 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:00 crc kubenswrapper[4848]: E0128 12:47:00.850262 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.880646 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.880685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.880703 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.880729 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.880747 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.983671 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.983714 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.983726 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.983745 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:00 crc kubenswrapper[4848]: I0128 12:47:00.983764 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:00Z","lastTransitionTime":"2026-01-28T12:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.087952 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.088018 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.088037 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.088066 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.088086 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.191743 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.191790 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.191804 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.191825 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.191842 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.247007 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/0.log" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.250474 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.251267 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.272717 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:00Z\\\",\\\"message\\\":\\\"6:59.998278 6087 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 12:46:59.998312 6087 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 12:46:59.999525 6087 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 12:46:59.999548 6087 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 12:46:59.999565 6087 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 12:46:59.999592 6087 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 12:46:59.999639 6087 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 12:46:59.999673 6087 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 12:46:59.999680 6087 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 12:46:59.999713 6087 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 12:46:59.999729 6087 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 12:46:59.999739 6087 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 12:46:59.999756 6087 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 12:46:59.999768 6087 factory.go:656] Stopping watch factory\\\\nI0128 12:46:59.999772 6087 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 12:46:59.999786 6087 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:46:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.286543 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.294831 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.295285 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.295425 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.295558 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.295680 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.302558 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.341688 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.363671 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.380545 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.427574 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.427644 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.427662 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.427688 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.427705 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.437645 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.457032 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.479623 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.494674 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.508911 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.522055 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.530590 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.530647 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.530659 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.530680 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.530696 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.533273 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.542656 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.551416 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.562809 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:01Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.633858 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.633893 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.633901 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.633916 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.633925 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.738190 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.738273 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.738283 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.738301 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.738313 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.800825 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 01:38:49.510593627 +0000 UTC Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.847519 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.847566 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.847577 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.847598 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.847609 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.849493 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:01 crc kubenswrapper[4848]: E0128 12:47:01.849625 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.849989 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:01 crc kubenswrapper[4848]: E0128 12:47:01.850063 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.949990 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.950293 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.950400 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.950494 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:01 crc kubenswrapper[4848]: I0128 12:47:01.950587 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:01Z","lastTransitionTime":"2026-01-28T12:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.053627 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.053686 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.053699 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.053723 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.053736 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.156417 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.156696 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.156763 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.156832 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.156905 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.259477 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.259534 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.259548 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.259571 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.259583 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.362205 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.362275 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.362288 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.362308 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.362357 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.464695 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.464975 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.465068 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.465160 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.465220 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.568451 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.568765 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.568902 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.569010 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.569123 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.671679 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.671722 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.671733 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.671751 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.671764 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.774771 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.774822 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.774837 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.774867 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.774943 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.779935 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:02 crc kubenswrapper[4848]: E0128 12:47:02.780079 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:02 crc kubenswrapper[4848]: E0128 12:47:02.780151 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:47:10.780128668 +0000 UTC m=+57.692345716 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.801729 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 15:56:55.435963178 +0000 UTC Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.849856 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.849946 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:02 crc kubenswrapper[4848]: E0128 12:47:02.850292 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:02 crc kubenswrapper[4848]: E0128 12:47:02.850399 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.878129 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.878438 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.878499 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.878534 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.878550 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.981158 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.981197 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.981208 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.981229 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:02 crc kubenswrapper[4848]: I0128 12:47:02.981243 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:02Z","lastTransitionTime":"2026-01-28T12:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.083983 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.084049 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.084064 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.084084 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.084098 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.187022 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.187071 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.187084 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.187108 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.187119 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.266325 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/1.log" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.267313 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/0.log" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.271212 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.271331 4848 scope.go:117] "RemoveContainer" containerID="9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.271129 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f" exitCode=1 Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.272139 4848 scope.go:117] "RemoveContainer" containerID="80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f" Jan 28 12:47:03 crc kubenswrapper[4848]: E0128 12:47:03.272353 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.286189 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.290034 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.290085 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.290100 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.290118 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.290135 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.297730 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.331814 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:00Z\\\",\\\"message\\\":\\\"6:59.998278 6087 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 12:46:59.998312 6087 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 12:46:59.999525 6087 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 12:46:59.999548 6087 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 12:46:59.999565 6087 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 12:46:59.999592 6087 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 12:46:59.999639 6087 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 12:46:59.999673 6087 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 12:46:59.999680 6087 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 12:46:59.999713 6087 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 12:46:59.999729 6087 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 12:46:59.999739 6087 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 12:46:59.999756 6087 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 12:46:59.999768 6087 factory.go:656] Stopping watch factory\\\\nI0128 12:46:59.999772 6087 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 12:46:59.999786 6087 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:46:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.343390 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.358237 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.369764 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.397646 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.397679 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.397691 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.397710 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.397721 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.400581 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.419731 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.442294 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.455570 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.468228 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.478998 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.494892 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.500268 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.500317 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.500331 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.500353 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.500372 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.508723 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.524065 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.534374 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.603369 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.603411 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.603421 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.603439 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.603451 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.706685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.706741 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.706758 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.706784 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.706809 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.730264 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.746682 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.746907 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.760287 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.774637 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.785904 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.797324 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.802791 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 11:13:40.882198321 +0000 UTC Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.809505 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.809578 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.809603 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.809638 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.809661 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.813297 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.829429 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.842392 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.849521 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:03 crc kubenswrapper[4848]: E0128 12:47:03.849709 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.849810 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:03 crc kubenswrapper[4848]: E0128 12:47:03.849897 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.860217 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.873822 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.894751 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:00Z\\\",\\\"message\\\":\\\"6:59.998278 6087 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 12:46:59.998312 6087 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 12:46:59.999525 6087 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 12:46:59.999548 6087 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 12:46:59.999565 6087 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 12:46:59.999592 6087 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 12:46:59.999639 6087 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 12:46:59.999673 6087 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 12:46:59.999680 6087 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 12:46:59.999713 6087 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 12:46:59.999729 6087 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 12:46:59.999739 6087 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 12:46:59.999756 6087 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 12:46:59.999768 6087 factory.go:656] Stopping watch factory\\\\nI0128 12:46:59.999772 6087 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 12:46:59.999786 6087 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:46:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.907370 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.912052 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.912089 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.912105 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.912126 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.912142 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:03Z","lastTransitionTime":"2026-01-28T12:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.921522 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.935860 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.949975 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:03 crc kubenswrapper[4848]: I0128 12:47:03.964130 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:03Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.015277 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.015329 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.015342 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.015363 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.015377 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.118482 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.118550 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.118562 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.118584 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.118601 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.221460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.221515 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.221533 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.221558 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.221575 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.277578 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/1.log" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.300201 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.322035 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.325201 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.325503 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.325718 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.325926 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.326127 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.345998 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.366823 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.383467 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.399727 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.422784 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.429345 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.429397 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.429434 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.429454 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.429467 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.438120 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.466386 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:00Z\\\",\\\"message\\\":\\\"6:59.998278 6087 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 12:46:59.998312 6087 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 12:46:59.999525 6087 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 12:46:59.999548 6087 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 12:46:59.999565 6087 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 12:46:59.999592 6087 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 12:46:59.999639 6087 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 12:46:59.999673 6087 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 12:46:59.999680 6087 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 12:46:59.999713 6087 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 12:46:59.999729 6087 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 12:46:59.999739 6087 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 12:46:59.999756 6087 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 12:46:59.999768 6087 factory.go:656] Stopping watch factory\\\\nI0128 12:46:59.999772 6087 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 12:46:59.999786 6087 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:46:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.481020 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.497500 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.514833 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.528187 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.532371 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.532434 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.532449 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.532472 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.532488 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.544271 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.559639 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.572066 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.583959 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.593891 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.635644 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.635680 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.635690 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.635707 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.635718 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.738079 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.738126 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.738137 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.738155 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.738166 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.803714 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 22:25:46.624922396 +0000 UTC Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.841748 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.841800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.841810 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.841828 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.841840 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.849084 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.849110 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:04 crc kubenswrapper[4848]: E0128 12:47:04.849231 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:04 crc kubenswrapper[4848]: E0128 12:47:04.849380 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.863640 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.875831 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.890238 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.902793 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.915527 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.932469 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.944101 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.944159 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.944171 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.944195 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.944222 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:04Z","lastTransitionTime":"2026-01-28T12:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.945615 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.957689 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.971605 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.981268 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:04 crc kubenswrapper[4848]: I0128 12:47:04.997919 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:00Z\\\",\\\"message\\\":\\\"6:59.998278 6087 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 12:46:59.998312 6087 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 12:46:59.999525 6087 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 12:46:59.999548 6087 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 12:46:59.999565 6087 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 12:46:59.999592 6087 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 12:46:59.999639 6087 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 12:46:59.999673 6087 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 12:46:59.999680 6087 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 12:46:59.999713 6087 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 12:46:59.999729 6087 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 12:46:59.999739 6087 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 12:46:59.999756 6087 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 12:46:59.999768 6087 factory.go:656] Stopping watch factory\\\\nI0128 12:46:59.999772 6087 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 12:46:59.999786 6087 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:46:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:04Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.010954 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:05Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.022088 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:05Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.036084 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:05Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.046744 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:05Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.047395 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.047440 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.047448 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.047465 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.047476 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.057086 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:05Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.068407 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:05Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.149383 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.149489 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.149505 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.149522 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.149534 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.252923 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.253010 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.253031 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.253469 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.253541 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.357391 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.357926 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.357940 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.357962 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.357978 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.461736 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.461789 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.461802 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.461817 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.461831 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.565485 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.565891 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.566011 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.566148 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.566302 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.669055 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.669098 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.669108 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.669123 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.669134 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.772130 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.772193 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.772203 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.772220 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.772231 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.804396 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 00:40:55.210940416 +0000 UTC Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.849541 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.849632 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:05 crc kubenswrapper[4848]: E0128 12:47:05.849749 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:05 crc kubenswrapper[4848]: E0128 12:47:05.849861 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.874937 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.874973 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.874982 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.874998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.875009 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.978702 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.979017 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.979189 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.979387 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:05 crc kubenswrapper[4848]: I0128 12:47:05.979522 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:05Z","lastTransitionTime":"2026-01-28T12:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.081891 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.082236 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.082364 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.082455 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.082549 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.186345 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.186398 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.186414 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.186438 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.186457 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.288731 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.288779 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.288789 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.288807 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.288819 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.393350 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.393403 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.393418 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.393442 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.393462 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.496049 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.496100 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.496113 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.496130 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.496141 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.599223 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.599278 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.599287 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.599305 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.599314 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.702318 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.702363 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.702373 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.702393 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.702403 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.805314 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 20:00:27.118065387 +0000 UTC Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.805998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.806045 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.806059 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.806081 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.806095 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.856957 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.856970 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:06 crc kubenswrapper[4848]: E0128 12:47:06.857148 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:06 crc kubenswrapper[4848]: E0128 12:47:06.857259 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.909395 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.909443 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.909454 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.909474 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.909486 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.917449 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.917492 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.917504 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.917521 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.917535 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: E0128 12:47:06.931952 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:06Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.935681 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.935731 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.935747 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.935770 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.935787 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: E0128 12:47:06.949348 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:06Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.953234 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.953283 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.953296 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.953316 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.953327 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: E0128 12:47:06.963676 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:06Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.967205 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.967308 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.967334 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.967367 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.967391 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:06 crc kubenswrapper[4848]: E0128 12:47:06.983969 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:06Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.988389 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.988419 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.988427 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.988442 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:06 crc kubenswrapper[4848]: I0128 12:47:06.988452 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:06Z","lastTransitionTime":"2026-01-28T12:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: E0128 12:47:07.000799 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:06Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:07 crc kubenswrapper[4848]: E0128 12:47:07.000919 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.011353 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.011448 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.011464 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.011508 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.011524 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.115030 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.115080 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.115092 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.115113 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.115122 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.219857 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.219892 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.219901 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.219915 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.219925 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.322523 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.322583 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.322594 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.322614 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.322627 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.426142 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.426196 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.426209 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.426230 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.426276 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.529838 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.529919 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.529943 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.529977 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.530004 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.633393 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.633496 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.633506 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.633522 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.633532 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.736493 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.736560 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.736573 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.736589 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.736600 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.806326 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 20:25:59.131798667 +0000 UTC Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.839007 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.839071 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.839081 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.839105 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.839116 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.849369 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:07 crc kubenswrapper[4848]: E0128 12:47:07.849584 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.849679 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:07 crc kubenswrapper[4848]: E0128 12:47:07.849888 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.942431 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.942500 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.942510 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.942528 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:07 crc kubenswrapper[4848]: I0128 12:47:07.942541 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:07Z","lastTransitionTime":"2026-01-28T12:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.045555 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.045601 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.045610 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.045626 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.045637 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.147758 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.147799 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.147809 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.147826 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.147837 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.251384 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.251448 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.251461 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.251482 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.251496 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.354995 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.355042 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.355051 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.355066 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.355077 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.458186 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.458444 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.458472 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.458490 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.458500 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.560924 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.561160 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.561169 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.561186 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.561198 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.664172 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.664234 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.664261 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.664329 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.664344 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.766275 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.766307 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.766318 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.766333 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.766343 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.807172 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 04:24:27.154312942 +0000 UTC Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.849467 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.849532 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:08 crc kubenswrapper[4848]: E0128 12:47:08.849685 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:08 crc kubenswrapper[4848]: E0128 12:47:08.849841 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.869385 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.869437 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.869453 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.869477 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.869495 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.971963 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.972019 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.972028 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.972046 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:08 crc kubenswrapper[4848]: I0128 12:47:08.972057 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:08Z","lastTransitionTime":"2026-01-28T12:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.075272 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.075340 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.075363 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.075392 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.075415 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.178827 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.178874 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.178886 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.178902 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.178913 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.281300 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.281355 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.281374 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.281396 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.281413 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.384135 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.384180 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.384191 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.384210 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.384222 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.487394 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.487490 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.487517 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.487552 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.487580 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.590643 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.590715 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.590728 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.590750 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.590764 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.694534 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.694606 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.694624 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.694651 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.694670 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.798088 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.798858 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.798878 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.798898 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.798911 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.807372 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 11:24:55.925849862 +0000 UTC Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.849471 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.849562 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:09 crc kubenswrapper[4848]: E0128 12:47:09.849647 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:09 crc kubenswrapper[4848]: E0128 12:47:09.849763 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.901301 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.901345 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.901358 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.901377 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:09 crc kubenswrapper[4848]: I0128 12:47:09.901394 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:09Z","lastTransitionTime":"2026-01-28T12:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.003967 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.004014 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.004028 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.004056 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.004070 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.107071 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.107407 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.107475 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.107576 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.107651 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.210732 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.210773 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.210796 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.210813 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.210823 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.314301 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.314369 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.314385 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.314408 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.314423 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.419364 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.419413 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.419427 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.419446 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.419459 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.522833 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.522872 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.522881 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.522901 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.522911 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.626349 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.626405 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.626419 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.626437 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.626451 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.729631 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.729691 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.729708 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.729731 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.729747 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.791864 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:10 crc kubenswrapper[4848]: E0128 12:47:10.792127 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:10 crc kubenswrapper[4848]: E0128 12:47:10.792278 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:47:26.79222645 +0000 UTC m=+73.704443688 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.808464 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 02:29:36.68031946 +0000 UTC Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.833340 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.833386 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.833411 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.833435 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.833450 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:10Z","lastTransitionTime":"2026-01-28T12:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.849861 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:10 crc kubenswrapper[4848]: I0128 12:47:10.849880 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:10 crc kubenswrapper[4848]: E0128 12:47:10.850091 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:10 crc kubenswrapper[4848]: E0128 12:47:10.850217 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.014566 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.014928 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.015085 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.015205 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.015362 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.118339 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.118613 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.118740 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.118848 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.118960 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.222631 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.222672 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.222684 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.222704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.222717 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.326401 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.326443 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.326453 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.326470 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.326480 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.429068 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.429126 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.429143 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.429164 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.429179 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.499163 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.499299 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.499334 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.499359 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499499 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499566 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:43.499546341 +0000 UTC m=+90.411763389 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499614 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499703 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499752 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499705 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:47:43.499670694 +0000 UTC m=+90.411887732 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499768 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499803 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:43.499774907 +0000 UTC m=+90.411991945 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.499832 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:43.499823078 +0000 UTC m=+90.412040296 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.532357 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.532421 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.532431 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.532449 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.532461 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.600545 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.600761 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.600787 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.600801 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.600873 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:47:43.600853052 +0000 UTC m=+90.513070100 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.635185 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.635349 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.635367 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.635392 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.635405 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.739031 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.739096 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.739108 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.739148 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.739161 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.809211 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 15:22:49.766195396 +0000 UTC Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.841695 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.841733 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.841743 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.841772 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.841786 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.849151 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.849196 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.849314 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:11 crc kubenswrapper[4848]: E0128 12:47:11.849435 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.943864 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.943934 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.943952 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.943974 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:11 crc kubenswrapper[4848]: I0128 12:47:11.943988 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:11Z","lastTransitionTime":"2026-01-28T12:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.046679 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.046757 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.046767 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.046785 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.046795 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.149473 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.149525 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.149537 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.149554 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.149569 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.252486 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.252531 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.252542 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.252559 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.252569 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.355935 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.355983 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.355992 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.356009 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.356024 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.459517 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.459567 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.459601 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.459628 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.459646 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.562087 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.562312 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.562326 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.562347 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.562361 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.665035 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.665090 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.665105 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.665127 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.665139 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.767838 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.767914 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.767934 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.767961 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.767980 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.809719 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 19:22:35.220749402 +0000 UTC Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.849669 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:12 crc kubenswrapper[4848]: E0128 12:47:12.849923 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.849679 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:12 crc kubenswrapper[4848]: E0128 12:47:12.850376 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.871312 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.871392 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.871402 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.871425 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.871439 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.974440 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.974495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.974508 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.974526 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:12 crc kubenswrapper[4848]: I0128 12:47:12.974539 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:12Z","lastTransitionTime":"2026-01-28T12:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.077606 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.077684 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.077700 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.077722 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.077736 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.181065 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.181123 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.181136 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.181164 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.181195 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.288450 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.288736 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.288834 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.288925 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.289011 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.391776 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.392630 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.392717 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.392797 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.392878 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.496446 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.496513 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.496528 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.496553 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.496569 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.599351 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.599417 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.599436 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.599461 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.599480 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.702545 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.702902 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.702982 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.703078 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.703163 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.805657 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.805695 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.805704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.805734 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.805747 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.810309 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 10:11:33.715993059 +0000 UTC Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.849162 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.849296 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:13 crc kubenswrapper[4848]: E0128 12:47:13.849616 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:13 crc kubenswrapper[4848]: E0128 12:47:13.849814 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.909048 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.909116 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.909134 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.909161 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:13 crc kubenswrapper[4848]: I0128 12:47:13.909196 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:13Z","lastTransitionTime":"2026-01-28T12:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.012208 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.012275 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.012287 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.012307 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.012319 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.115668 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.115743 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.115755 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.115773 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.115788 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.219476 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.219549 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.219566 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.219594 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.219613 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.322533 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.322593 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.322609 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.322634 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.322652 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.425306 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.425925 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.425992 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.426063 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.426135 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.529498 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.529763 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.529856 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.529933 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.530004 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.631896 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.631935 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.631949 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.631966 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.631977 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.734908 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.734954 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.734967 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.734985 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.734997 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.810568 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 16:51:41.497991144 +0000 UTC Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.838200 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.838643 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.838801 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.838955 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.839074 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.849710 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.849892 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:14 crc kubenswrapper[4848]: E0128 12:47:14.850119 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:14 crc kubenswrapper[4848]: E0128 12:47:14.850232 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.850536 4848 scope.go:117] "RemoveContainer" containerID="80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.865537 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.878013 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.891364 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.904806 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.916532 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.930411 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.941998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.942033 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.942042 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.942061 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.942073 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:14Z","lastTransitionTime":"2026-01-28T12:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.947207 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.972726 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ccea300d0c503aef1231c96ff9e5f21afa409b08b601f229eead30b71bade47\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:00Z\\\",\\\"message\\\":\\\"6:59.998278 6087 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 12:46:59.998312 6087 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 12:46:59.999525 6087 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 12:46:59.999548 6087 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 12:46:59.999565 6087 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 12:46:59.999592 6087 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 12:46:59.999639 6087 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 12:46:59.999673 6087 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 12:46:59.999680 6087 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 12:46:59.999713 6087 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 12:46:59.999729 6087 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 12:46:59.999739 6087 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 12:46:59.999756 6087 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 12:46:59.999768 6087 factory.go:656] Stopping watch factory\\\\nI0128 12:46:59.999772 6087 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 12:46:59.999786 6087 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:46:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:14 crc kubenswrapper[4848]: I0128 12:47:14.989733 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:14Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.003184 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.014803 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.028214 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.040518 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.043879 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.043915 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.043926 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.043941 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.043952 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.053042 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.072723 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.086168 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.101211 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.116673 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.133231 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.146201 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.146239 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.146268 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.146290 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.146303 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.156420 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.168632 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.182088 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.198943 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.215040 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.228457 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.248839 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.248909 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.248926 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.248953 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.248975 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.252857 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.264942 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.277048 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.295608 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.314313 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.321410 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/1.log" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.326103 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.326616 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.329649 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.345619 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.351546 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.351592 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.351604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.351621 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.351634 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.364864 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.393708 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.413656 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.436922 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.454716 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.454747 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.454757 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.454774 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.454785 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.477210 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.492736 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.508472 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.524951 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.536038 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.549862 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.560827 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.560880 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.560890 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.560912 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.560927 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.568058 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.582811 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.597978 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.613808 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.630185 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.643088 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.658796 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.663962 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.664002 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.664012 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.664029 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.664042 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.677280 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.693926 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.768215 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.768331 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.768349 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.768729 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.769050 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.811006 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 20:29:53.531475558 +0000 UTC Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.849990 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.850088 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:15 crc kubenswrapper[4848]: E0128 12:47:15.850159 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:15 crc kubenswrapper[4848]: E0128 12:47:15.850321 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.871565 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.871607 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.871619 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.871636 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.871651 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.974984 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.975047 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.975061 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.975085 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:15 crc kubenswrapper[4848]: I0128 12:47:15.975112 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:15Z","lastTransitionTime":"2026-01-28T12:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.078650 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.078716 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.078725 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.078757 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.078769 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.181745 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.181841 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.181852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.181868 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.181879 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.284554 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.284604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.284616 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.284635 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.284649 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.332646 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/2.log" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.333566 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/1.log" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.337825 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0" exitCode=1 Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.337901 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.338036 4848 scope.go:117] "RemoveContainer" containerID="80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.338715 4848 scope.go:117] "RemoveContainer" containerID="1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0" Jan 28 12:47:16 crc kubenswrapper[4848]: E0128 12:47:16.338908 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.359870 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.378746 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.388037 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.388095 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.388113 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.388135 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.388148 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.397067 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.411763 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.425982 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.442036 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.457031 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.482207 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d2d743d4f53043f1bff56def3a0fc02305f18503dfe6290542095504008c3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:02Z\\\",\\\"message\\\":\\\"ighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.261015 6344 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler/scheduler]} name:Service_openshift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:02.260974 6344 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0128 12:47:02.261148 6344 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0128 12:47:02.261184 6344 ovnkube.go:599] Stopped ovnkube\\\\nI0128 12:47:02.261211 6344 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0128 12:47:02.261352 6344 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.490992 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.491028 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.491037 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.491053 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.491064 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.498057 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.514533 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.532021 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.547940 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.563339 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.580479 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.593162 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.595167 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.595230 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.595260 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.595283 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.595297 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.610165 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.624375 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:16Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.697905 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.697947 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.697958 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.697973 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.697983 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.801193 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.801274 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.801289 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.801310 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.801326 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.811882 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 00:36:35.609703382 +0000 UTC Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.849503 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.849607 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:16 crc kubenswrapper[4848]: E0128 12:47:16.849663 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:16 crc kubenswrapper[4848]: E0128 12:47:16.849686 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.903759 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.903797 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.903813 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.903832 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:16 crc kubenswrapper[4848]: I0128 12:47:16.903844 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:16Z","lastTransitionTime":"2026-01-28T12:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.007049 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.007106 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.007120 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.007141 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.007156 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.110323 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.110376 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.110392 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.110412 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.110424 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.213017 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.213077 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.213091 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.213110 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.213126 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.305862 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.305910 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.305921 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.305938 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.305951 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.320279 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.328699 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.328785 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.328800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.328824 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.328839 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.344312 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.346470 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/2.log" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.350430 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.350481 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.350493 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.350514 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.350527 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.354340 4848 scope.go:117] "RemoveContainer" containerID="1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0" Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.354534 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.364926 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.368393 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.370840 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.370874 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.370889 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.370911 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.370926 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.383827 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.384732 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.389340 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.389388 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.389398 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.389417 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.389428 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.401389 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.403428 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.403593 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.405897 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.405938 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.405952 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.405971 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.405982 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.415348 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.430614 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.447827 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.462785 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.478443 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.499344 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.509694 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.509729 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.509740 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.509757 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.509770 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.515070 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.533388 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.549678 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.566197 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.579633 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.594862 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.612680 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.612731 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.612742 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.612772 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.612785 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.619341 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.636466 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.716068 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.716127 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.716139 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.716158 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.716173 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.812472 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 04:28:21.24653049 +0000 UTC Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.819658 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.819686 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.819695 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.819716 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.819737 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.849292 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.849352 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.849454 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:17 crc kubenswrapper[4848]: E0128 12:47:17.849621 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.922652 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.922713 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.922726 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.922749 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:17 crc kubenswrapper[4848]: I0128 12:47:17.922765 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:17Z","lastTransitionTime":"2026-01-28T12:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.026316 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.026851 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.026869 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.026889 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.026903 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.129931 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.129968 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.129978 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.129997 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.130010 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.232660 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.232708 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.232718 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.232738 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.232751 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.339716 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.339776 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.339786 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.339804 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.339821 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.442727 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.442800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.442813 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.442835 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.442848 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.545549 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.545593 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.545604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.545621 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.545632 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.648907 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.649554 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.649575 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.649599 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.649613 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.805363 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.805414 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.805425 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.805442 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.805452 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.813438 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 12:13:48.528346984 +0000 UTC Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.849432 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.849472 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:18 crc kubenswrapper[4848]: E0128 12:47:18.849624 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:18 crc kubenswrapper[4848]: E0128 12:47:18.849732 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.908660 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.908717 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.908733 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.908751 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:18 crc kubenswrapper[4848]: I0128 12:47:18.908765 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:18Z","lastTransitionTime":"2026-01-28T12:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.011756 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.011810 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.011822 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.011843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.011854 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.114481 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.114540 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.114550 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.114566 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.114577 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.220733 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.220804 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.220820 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.220843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.220856 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.324071 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.324133 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.324145 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.324169 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.324179 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.426588 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.426632 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.426641 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.426660 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.426669 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.546582 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.546638 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.546650 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.546669 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.546684 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.649656 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.649708 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.649720 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.649741 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.649755 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.752460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.752523 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.752546 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.752579 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.752603 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.814371 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 02:15:06.730336562 +0000 UTC Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.849328 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.849447 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:19 crc kubenswrapper[4848]: E0128 12:47:19.849536 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:19 crc kubenswrapper[4848]: E0128 12:47:19.849678 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.855572 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.855620 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.855633 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.855656 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.855671 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.958612 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.958701 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.958727 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.958752 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:19 crc kubenswrapper[4848]: I0128 12:47:19.958768 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:19Z","lastTransitionTime":"2026-01-28T12:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.086688 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.086736 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.086745 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.086765 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.086776 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.190281 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.190322 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.190334 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.190349 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.190360 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.294179 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.294230 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.294261 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.294281 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.294293 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.396447 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.396496 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.396507 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.396524 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.396535 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.557905 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.558755 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.558938 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.559046 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.559150 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.663089 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.663755 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.665823 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.666077 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.666359 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.769335 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.769606 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.769688 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.769760 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.769823 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.814705 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 18:16:18.805384332 +0000 UTC Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.850531 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:20 crc kubenswrapper[4848]: E0128 12:47:20.850725 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.851237 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:20 crc kubenswrapper[4848]: E0128 12:47:20.851316 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.878865 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.879350 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.879457 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.879555 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.879629 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.981966 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.982036 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.982049 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.982067 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:20 crc kubenswrapper[4848]: I0128 12:47:20.982079 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:20Z","lastTransitionTime":"2026-01-28T12:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.085928 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.085992 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.086010 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.086033 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.086046 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.188805 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.188871 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.188889 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.188916 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.188941 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.292810 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.292843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.292852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.292889 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.292901 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.396240 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.396317 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.396326 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.396344 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.396354 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.499550 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.499604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.499615 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.499632 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.499643 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.602424 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.602460 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.602469 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.602486 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.602497 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.706165 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.706215 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.706227 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.706266 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.706280 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.809616 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.809664 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.809673 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.809691 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.809702 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.816038 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 01:51:37.319770093 +0000 UTC Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.849613 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:21 crc kubenswrapper[4848]: E0128 12:47:21.849783 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.849999 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:21 crc kubenswrapper[4848]: E0128 12:47:21.850068 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.912412 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.912658 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.912670 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.912691 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:21 crc kubenswrapper[4848]: I0128 12:47:21.912704 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:21Z","lastTransitionTime":"2026-01-28T12:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.015985 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.016455 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.016871 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.017127 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.017269 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.119767 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.120103 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.120200 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.120302 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.120400 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.224133 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.224469 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.224564 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.224644 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.224709 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.327867 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.327911 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.327921 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.327937 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.327947 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.431417 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.431464 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.431475 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.431495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.431508 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.534202 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.534552 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.534632 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.534719 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.534805 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.637703 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.638434 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.638505 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.638532 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.638549 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.741834 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.741893 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.741907 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.741929 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.741944 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.816855 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 20:36:31.431695334 +0000 UTC Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.845605 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.845670 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.845681 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.845704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.845717 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.850653 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.850771 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:22 crc kubenswrapper[4848]: E0128 12:47:22.850810 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:22 crc kubenswrapper[4848]: E0128 12:47:22.851019 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.948284 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.948350 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.948370 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.948398 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:22 crc kubenswrapper[4848]: I0128 12:47:22.948416 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:22Z","lastTransitionTime":"2026-01-28T12:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.051703 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.051759 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.051772 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.051793 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.051809 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.155294 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.155350 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.155364 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.155384 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.155398 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.258476 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.258518 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.258528 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.258544 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.258556 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.362037 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.362090 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.362099 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.362118 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.362131 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.464610 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.464657 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.464667 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.464688 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.464697 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.567082 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.567421 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.567509 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.567599 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.567669 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.670631 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.670672 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.670681 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.670696 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.670708 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.774616 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.775102 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.775467 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.775728 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.775968 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.818044 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 01:35:25.952437272 +0000 UTC Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.849136 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:23 crc kubenswrapper[4848]: E0128 12:47:23.849346 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.849672 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:23 crc kubenswrapper[4848]: E0128 12:47:23.849750 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.879225 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.879292 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.879311 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.879333 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.879349 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.982224 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.982506 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.982625 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.982704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:23 crc kubenswrapper[4848]: I0128 12:47:23.982782 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:23Z","lastTransitionTime":"2026-01-28T12:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.086016 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.086093 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.086118 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.086155 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.086180 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.191972 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.192011 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.192026 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.192045 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.192059 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.294349 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.294389 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.294401 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.294419 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.294433 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.396659 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.396719 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.396731 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.396752 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.396763 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.499833 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.499887 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.499904 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.499927 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.499946 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.603140 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.603185 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.603196 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.603217 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.603232 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.706407 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.706572 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.706585 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.706699 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.706753 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.810310 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.810362 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.810372 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.810392 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.810406 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.819382 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 05:48:29.239223733 +0000 UTC Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.849674 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.849683 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:24 crc kubenswrapper[4848]: E0128 12:47:24.849898 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:24 crc kubenswrapper[4848]: E0128 12:47:24.850014 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.866040 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.881206 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.897130 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.913759 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.914117 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.914383 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.914419 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.914441 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.914454 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:24Z","lastTransitionTime":"2026-01-28T12:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.933641 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.952928 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.969957 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.983944 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:24 crc kubenswrapper[4848]: I0128 12:47:24.999191 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:24Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.018009 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.018058 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.018073 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.018095 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.018111 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.019700 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.033991 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.048265 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.067893 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.082897 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.097292 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.112900 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.121206 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.121272 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.121285 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.121311 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.121327 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.126782 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.224642 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.224700 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.224712 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.224735 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.224746 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.328008 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.328073 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.328091 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.328109 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.328121 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.431451 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.431519 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.431531 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.431548 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.431558 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.535315 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.535371 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.535383 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.535406 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.535420 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.638376 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.638429 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.638438 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.638458 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.638468 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.741704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.741750 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.741762 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.741782 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.741795 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.820347 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 12:26:03.224667213 +0000 UTC Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.844599 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.844650 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.844661 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.844677 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.844687 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.849173 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.849207 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:25 crc kubenswrapper[4848]: E0128 12:47:25.849353 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:25 crc kubenswrapper[4848]: E0128 12:47:25.849488 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.947998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.948060 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.948074 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.948095 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:25 crc kubenswrapper[4848]: I0128 12:47:25.948110 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:25Z","lastTransitionTime":"2026-01-28T12:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.050320 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.050361 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.050372 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.050391 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.050405 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.153514 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.153578 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.153596 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.153623 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.153637 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.256775 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.256830 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.256841 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.256868 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.256889 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.359852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.359908 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.359924 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.359945 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.359956 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.462379 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.462456 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.462472 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.462493 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.462507 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.565744 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.565805 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.565822 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.565847 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.565862 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.668847 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.668958 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.668969 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.668989 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.669001 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.772586 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.772656 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.772670 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.772692 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.772705 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.821161 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 01:38:53.795546309 +0000 UTC Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.849431 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.849428 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:26 crc kubenswrapper[4848]: E0128 12:47:26.849588 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:26 crc kubenswrapper[4848]: E0128 12:47:26.849726 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.864626 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:26 crc kubenswrapper[4848]: E0128 12:47:26.864902 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:26 crc kubenswrapper[4848]: E0128 12:47:26.865033 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:47:58.864999774 +0000 UTC m=+105.777216962 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.875744 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.875821 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.875854 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.875874 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.875887 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.978629 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.978685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.978701 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.978721 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:26 crc kubenswrapper[4848]: I0128 12:47:26.978733 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:26Z","lastTransitionTime":"2026-01-28T12:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.081813 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.081870 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.081881 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.081898 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.081913 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.184909 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.184975 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.184987 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.185011 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.185025 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.288425 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.288471 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.288482 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.288501 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.288543 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.390812 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.390867 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.390891 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.390911 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.390923 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.493827 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.493876 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.493889 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.493909 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.493922 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.597008 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.597232 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.597348 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.597494 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.597578 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.700529 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.700573 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.700587 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.700603 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.700616 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.713298 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.713567 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.713720 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.713857 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.713961 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.726990 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:27Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.731918 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.731970 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.731978 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.731995 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.732005 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.745550 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:27Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.749621 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.749918 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.749996 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.750065 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.750140 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.762732 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:27Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.766581 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.766737 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.766800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.766875 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.766942 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.779456 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:27Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.783814 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.783887 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.783899 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.783922 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.783935 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.798580 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:27Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.798744 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.803267 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.803295 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.803304 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.803320 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.803330 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.821687 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 23:01:42.999741027 +0000 UTC Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.849408 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.849472 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.849584 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.849681 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.850661 4848 scope.go:117] "RemoveContainer" containerID="1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0" Jan 28 12:47:27 crc kubenswrapper[4848]: E0128 12:47:27.850948 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.906205 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.906295 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.906309 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.906328 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:27 crc kubenswrapper[4848]: I0128 12:47:27.906345 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:27Z","lastTransitionTime":"2026-01-28T12:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.009219 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.009290 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.009302 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.009324 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.009336 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.111434 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.111712 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.111845 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.111951 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.112039 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.214325 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.214362 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.214371 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.214389 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.214403 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.317690 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.317728 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.317738 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.317752 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.317763 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.421263 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.421319 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.421335 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.421357 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.421372 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.524606 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.524657 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.524672 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.524693 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.524707 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.627511 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.627559 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.627570 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.627586 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.627597 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.730709 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.730751 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.730760 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.730783 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.730799 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.822679 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 17:16:40.119735996 +0000 UTC Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.834060 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.834107 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.834119 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.834140 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.834156 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.849952 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.849984 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:28 crc kubenswrapper[4848]: E0128 12:47:28.850121 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:28 crc kubenswrapper[4848]: E0128 12:47:28.850207 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.937615 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.937672 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.937685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.937705 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:28 crc kubenswrapper[4848]: I0128 12:47:28.937718 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:28Z","lastTransitionTime":"2026-01-28T12:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.040904 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.040941 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.040950 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.040965 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.040978 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.143346 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.143395 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.143404 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.143425 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.143435 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.246430 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.246479 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.246489 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.246509 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.246522 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.353748 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.353790 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.353800 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.353823 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.353832 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.457181 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.457237 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.457269 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.457290 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.457306 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.560833 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.561097 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.561108 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.561127 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.561140 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.663940 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.663976 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.663985 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.664017 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.664028 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.766620 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.766670 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.766682 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.766701 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.766716 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.822995 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 14:50:39.513602662 +0000 UTC Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.849638 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.849675 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:29 crc kubenswrapper[4848]: E0128 12:47:29.849787 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:29 crc kubenswrapper[4848]: E0128 12:47:29.849930 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.869601 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.869646 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.869657 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.869680 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.869692 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.972126 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.972187 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.972200 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.972222 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:29 crc kubenswrapper[4848]: I0128 12:47:29.972238 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:29Z","lastTransitionTime":"2026-01-28T12:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.074917 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.074959 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.074970 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.074989 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.075003 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.177656 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.177694 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.177704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.177721 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.177733 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.280707 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.280751 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.280765 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.280788 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.280805 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.383042 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.383405 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.383495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.383572 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.383640 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.486159 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.486213 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.486225 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.486262 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.486275 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.588760 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.588812 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.588825 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.588843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.588860 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.691495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.691554 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.691572 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.691596 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.691610 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.794646 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.794685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.794696 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.794711 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.794720 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.823404 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 12:58:24.71418576 +0000 UTC Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.849125 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.849182 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:30 crc kubenswrapper[4848]: E0128 12:47:30.849388 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:30 crc kubenswrapper[4848]: E0128 12:47:30.849509 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.897048 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.897100 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.897112 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.897133 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:30 crc kubenswrapper[4848]: I0128 12:47:30.897148 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:30Z","lastTransitionTime":"2026-01-28T12:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.000032 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.000084 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.000097 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.000117 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.000129 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.103192 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.103275 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.103288 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.103308 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.103322 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.206693 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.206742 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.206752 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.206770 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.206780 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.309414 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.309456 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.309466 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.309485 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.309499 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.411941 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.411979 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.411990 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.412006 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.412019 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.514004 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.514071 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.514082 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.514103 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.514115 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.616789 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.617042 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.617056 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.617118 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.617131 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.720150 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.720189 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.720201 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.720215 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.720225 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.823310 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.823349 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.823359 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.823380 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.823392 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.823508 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 15:44:36.475804167 +0000 UTC Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.849547 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.849593 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:31 crc kubenswrapper[4848]: E0128 12:47:31.849718 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:31 crc kubenswrapper[4848]: E0128 12:47:31.849832 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.926192 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.926231 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.926267 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.926288 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:31 crc kubenswrapper[4848]: I0128 12:47:31.926301 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:31Z","lastTransitionTime":"2026-01-28T12:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.028898 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.028947 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.028958 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.028977 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.028989 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.132080 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.132128 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.132139 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.132155 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.132167 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.235524 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.235590 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.235606 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.235629 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.235643 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.337919 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.337981 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.337995 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.338015 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.338028 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.440797 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.440862 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.440874 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.440892 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.440904 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.543446 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.543507 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.543520 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.543546 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.543562 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.649119 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.649183 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.649195 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.649215 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.649227 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.752431 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.752501 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.752515 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.752550 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.752562 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.824409 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 01:03:17.549264229 +0000 UTC Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.849410 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:32 crc kubenswrapper[4848]: E0128 12:47:32.849690 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.849849 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:32 crc kubenswrapper[4848]: E0128 12:47:32.850107 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.860423 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.860455 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.860464 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.860476 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.860486 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.963854 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.963923 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.963935 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.963956 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:32 crc kubenswrapper[4848]: I0128 12:47:32.963969 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:32Z","lastTransitionTime":"2026-01-28T12:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.067297 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.067350 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.067362 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.067382 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.067392 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.170432 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.170478 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.170491 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.170510 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.170522 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.273504 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.273567 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.273581 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.273637 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.273654 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.376536 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.376577 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.376591 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.376609 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.376622 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.479101 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.479156 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.479166 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.479183 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.479196 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.582668 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.582722 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.582736 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.582754 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.582769 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.686713 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.686783 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.686807 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.686840 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.686865 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.790395 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.790472 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.790498 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.790528 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.790549 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.825150 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 14:26:16.32650131 +0000 UTC Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.849294 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.849365 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:33 crc kubenswrapper[4848]: E0128 12:47:33.849483 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:33 crc kubenswrapper[4848]: E0128 12:47:33.849531 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.894008 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.894080 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.894102 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.894134 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.894154 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.997306 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.997357 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.997370 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.997389 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:33 crc kubenswrapper[4848]: I0128 12:47:33.997400 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:33Z","lastTransitionTime":"2026-01-28T12:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.100765 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.100815 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.100828 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.100843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.100854 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.203899 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.203960 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.203978 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.204002 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.204020 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.306795 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.306850 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.306864 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.306885 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.306906 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.408929 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.408978 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.408990 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.409008 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.409019 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.512658 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.512747 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.512771 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.512805 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.512830 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.615600 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.615653 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.615662 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.615677 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.615688 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.718748 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.718805 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.718819 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.718863 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.718876 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.823075 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.823177 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.823206 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.823304 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.823329 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.826061 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 01:27:05.10817199 +0000 UTC Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.849892 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:34 crc kubenswrapper[4848]: E0128 12:47:34.850056 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.850124 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:34 crc kubenswrapper[4848]: E0128 12:47:34.850195 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.866925 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.880910 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.898908 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.916197 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.926696 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.926776 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.926793 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.926813 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.926846 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:34Z","lastTransitionTime":"2026-01-28T12:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.932356 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.945605 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.958073 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.973050 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:34 crc kubenswrapper[4848]: I0128 12:47:34.986361 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:34Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.006231 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.017073 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.030055 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.030099 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.030110 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.030129 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.030139 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.030159 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.041840 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.054805 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.067447 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.079991 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.091444 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.133202 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.133277 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.133287 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.133304 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.133318 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.235605 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.235666 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.235679 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.235705 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.235722 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.339382 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.339432 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.339443 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.339461 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.339475 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.410810 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/0.log" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.410986 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerDied","Data":"c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.411275 4848 generic.go:334] "Generic (PLEG): container finished" podID="52f51c55-df27-4e41-b7c5-e3d714909803" containerID="c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade" exitCode=1 Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.411610 4848 scope.go:117] "RemoveContainer" containerID="c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.426157 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.440885 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.445769 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.445820 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.445832 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.445852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.445872 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.464067 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.478007 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.493354 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:34Z\\\",\\\"message\\\":\\\"2026-01-28T12:46:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6\\\\n2026-01-28T12:46:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6 to /host/opt/cni/bin/\\\\n2026-01-28T12:46:49Z [verbose] multus-daemon started\\\\n2026-01-28T12:46:49Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:47:34Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.512079 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.527638 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.543177 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.550085 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.550166 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.550180 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.550214 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.550227 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.558776 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.576293 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.593581 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.609139 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.626549 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.643610 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.652541 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.652723 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.652968 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.653166 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.653383 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.662015 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.677683 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.699058 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:35Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.756632 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.756683 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.756693 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.756711 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.756721 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.826455 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 12:14:32.506768888 +0000 UTC Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.849184 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.849220 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:35 crc kubenswrapper[4848]: E0128 12:47:35.849372 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:35 crc kubenswrapper[4848]: E0128 12:47:35.849454 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.859297 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.859346 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.859357 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.859377 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.859387 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.962467 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.962977 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.963061 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.963135 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:35 crc kubenswrapper[4848]: I0128 12:47:35.963228 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:35Z","lastTransitionTime":"2026-01-28T12:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.066403 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.066471 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.066486 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.066510 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.066524 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.170946 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.171355 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.171423 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.171508 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.171573 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.274310 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.274681 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.274790 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.274878 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.274991 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.378874 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.378932 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.378945 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.379029 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.379059 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.417959 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/0.log" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.418031 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerStarted","Data":"6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.436293 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.455408 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:34Z\\\",\\\"message\\\":\\\"2026-01-28T12:46:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6\\\\n2026-01-28T12:46:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6 to /host/opt/cni/bin/\\\\n2026-01-28T12:46:49Z [verbose] multus-daemon started\\\\n2026-01-28T12:46:49Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:47:34Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.475143 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.481396 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.481459 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.481481 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.481502 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.481514 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.490698 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.505763 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.521758 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.536874 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.552874 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.567567 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.578984 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.584083 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.584124 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.584139 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.584161 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.584174 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.592657 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.622671 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.637876 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.651607 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.667009 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.679702 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.687178 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.687235 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.687273 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.687305 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.687321 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.708562 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.790846 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.790900 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.790911 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.790934 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.790945 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.827334 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 03:10:48.138084822 +0000 UTC Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.849393 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.849408 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:36 crc kubenswrapper[4848]: E0128 12:47:36.849547 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:36 crc kubenswrapper[4848]: E0128 12:47:36.849842 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.895360 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.895445 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.895459 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.895480 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:36 crc kubenswrapper[4848]: I0128 12:47:36.895493 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:36Z","lastTransitionTime":"2026-01-28T12:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.000045 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.000106 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.000121 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.000144 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.000157 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.102832 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.102877 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.102887 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.102908 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.102919 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.207215 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.207282 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.207293 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.207312 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.207323 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.310194 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.310563 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.310754 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.310845 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.310979 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.413927 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.413973 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.413984 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.414003 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.414014 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.516904 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.517225 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.517385 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.517472 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.517535 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.620316 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.620368 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.620387 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.620405 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.620418 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.722916 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.722967 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.722980 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.722998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.723010 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.826408 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.826469 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.826484 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.826505 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.826520 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.828483 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 09:07:05.417508226 +0000 UTC Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.849411 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:37 crc kubenswrapper[4848]: E0128 12:47:37.849646 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.849433 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:37 crc kubenswrapper[4848]: E0128 12:47:37.850039 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.930289 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.930336 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.930348 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.930366 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:37 crc kubenswrapper[4848]: I0128 12:47:37.930377 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:37Z","lastTransitionTime":"2026-01-28T12:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.033424 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.033484 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.033497 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.033519 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.033534 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.087181 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.088304 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.088404 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.088515 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.088605 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.105910 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.110227 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.110369 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.110456 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.110564 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.110630 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.126080 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.131618 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.131673 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.131685 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.131725 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.131736 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.146325 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.151021 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.151060 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.151072 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.151090 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.151100 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.165644 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.169771 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.169806 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.169815 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.169843 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.169854 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.184203 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c02a3865-cae2-4ed5-ac7f-9b2b69ab66fb\\\",\\\"systemUUID\\\":\\\"e0f42f58-1276-4f22-b2e0-2ee1470a6c7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.184355 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.186725 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.186780 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.186792 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.186813 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.186840 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.290495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.290616 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.290679 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.290719 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.290786 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.393316 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.393348 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.393376 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.393392 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.393401 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.496912 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.496976 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.496999 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.497030 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.497051 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.599461 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.599497 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.599505 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.599531 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.599541 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.702618 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.702658 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.702666 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.702686 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.702696 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.805917 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.806317 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.806475 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.806622 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.806763 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.829474 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 15:13:14.534101857 +0000 UTC Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.849803 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.849803 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.850391 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:38 crc kubenswrapper[4848]: E0128 12:47:38.850483 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.910009 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.910056 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.910068 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.910089 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:38 crc kubenswrapper[4848]: I0128 12:47:38.910102 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:38Z","lastTransitionTime":"2026-01-28T12:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.012536 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.012569 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.012600 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.012616 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.012626 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.115988 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.116033 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.116049 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.116067 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.116078 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.221701 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.221763 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.221772 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.221794 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.221806 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.324510 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.324582 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.324595 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.324641 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.324654 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.427734 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.427812 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.427829 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.427852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.427867 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.531533 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.531581 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.531593 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.531611 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.531624 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.634442 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.634765 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.634870 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.634952 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.635025 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.737751 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.737812 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.737828 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.737858 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.737880 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.830681 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 08:15:04.230961823 +0000 UTC Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.846625 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.846718 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.846928 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.847036 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.847096 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.849489 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.849500 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:39 crc kubenswrapper[4848]: E0128 12:47:39.849625 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:39 crc kubenswrapper[4848]: E0128 12:47:39.849751 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.950455 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.950495 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.950507 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.950524 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:39 crc kubenswrapper[4848]: I0128 12:47:39.950534 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:39Z","lastTransitionTime":"2026-01-28T12:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.053413 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.053474 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.053493 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.053519 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.053530 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.157287 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.157361 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.157384 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.157415 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.157437 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.260811 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.260853 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.260865 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.260885 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.260898 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.364155 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.364197 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.364207 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.364225 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.364237 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.466517 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.466564 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.466583 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.466604 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.466618 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.569596 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.569634 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.569641 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.569656 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.569665 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.673156 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.673238 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.673264 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.673280 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.673291 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.776126 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.776169 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.776180 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.776199 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.776211 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.831201 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 05:46:27.896758838 +0000 UTC Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.849947 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:40 crc kubenswrapper[4848]: E0128 12:47:40.850133 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.850455 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:40 crc kubenswrapper[4848]: E0128 12:47:40.850779 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.880080 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.880389 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.880589 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.880751 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.880947 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.984693 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.985101 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.985368 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.985612 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:40 crc kubenswrapper[4848]: I0128 12:47:40.985808 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:40Z","lastTransitionTime":"2026-01-28T12:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.090516 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.090570 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.090585 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.090609 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.090624 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.193720 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.194153 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.194328 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.194461 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.194578 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.298852 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.298915 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.298930 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.298951 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.298962 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.401640 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.402102 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.402311 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.402484 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.402645 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.505230 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.505360 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.505379 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.505403 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.505418 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.608190 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.608306 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.608334 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.608363 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.608397 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.711228 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.711315 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.711326 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.711350 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.711364 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.813711 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.813749 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.813757 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.813772 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.813783 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.832334 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 15:01:10.269989854 +0000 UTC Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.849473 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.850101 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:41 crc kubenswrapper[4848]: E0128 12:47:41.850571 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:41 crc kubenswrapper[4848]: E0128 12:47:41.850313 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.850679 4848 scope.go:117] "RemoveContainer" containerID="1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.917959 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.918004 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.918016 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.918035 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:41 crc kubenswrapper[4848]: I0128 12:47:41.918048 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:41Z","lastTransitionTime":"2026-01-28T12:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.027235 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.027777 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.027807 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.027837 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.027856 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.130844 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.130885 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.130898 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.130920 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.130934 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.234122 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.234161 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.234178 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.234200 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.234214 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.338469 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.338526 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.338541 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.338559 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.338571 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.440382 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.440700 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.440715 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.440735 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.440745 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.441726 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/2.log" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.446834 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.447501 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.464986 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.477689 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.491612 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.504140 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.520851 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.536265 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.544065 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.544111 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.544123 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.544144 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.544159 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.552095 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.574474 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.590726 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.604324 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.618485 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.634224 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.646979 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.647028 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.647038 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.647056 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.647067 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.648790 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:34Z\\\",\\\"message\\\":\\\"2026-01-28T12:46:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6\\\\n2026-01-28T12:46:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6 to /host/opt/cni/bin/\\\\n2026-01-28T12:46:49Z [verbose] multus-daemon started\\\\n2026-01-28T12:46:49Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:47:34Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.664771 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.674831 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.688194 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.702199 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:42Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.750393 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.750478 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.750506 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.750535 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.750555 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.833329 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 10:29:01.056227921 +0000 UTC Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.849968 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.850070 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:42 crc kubenswrapper[4848]: E0128 12:47:42.850134 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:42 crc kubenswrapper[4848]: E0128 12:47:42.850293 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.853188 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.853239 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.853275 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.853300 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.853315 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.956159 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.956218 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.956229 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.956280 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:42 crc kubenswrapper[4848]: I0128 12:47:42.956296 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:42Z","lastTransitionTime":"2026-01-28T12:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.059386 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.059437 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.059449 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.059467 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.059479 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.161763 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.161809 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.161823 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.161845 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.161862 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.265596 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.265663 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.265681 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.265704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.265723 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.368642 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.368704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.368715 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.368733 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.368746 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.452758 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/3.log" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.453484 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/2.log" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.457420 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" exitCode=1 Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.457476 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.457525 4848 scope.go:117] "RemoveContainer" containerID="1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.458725 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.459064 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.470632 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.470704 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.470719 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.470739 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.470751 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.471778 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.486454 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.509221 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1707d8d7f1f5408d3b744eff1a2cd9a0a6833a793a09a0f6616dc112fafb70f0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:16Z\\\",\\\"message\\\":\\\"Pod openshift-image-registry/node-ca-7pzvm after 0 failed attempt(s)\\\\nI0128 12:47:15.842523 6524 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-7pzvm\\\\nI0128 12:47:15.842378 6524 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0128 12:47:15.842362 6524 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-g9vht\\\\nF0128 12:47:15.842539 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:15Z is after 2025-08-24T17:21:41Z]\\\\nI0128 12:47:15.842546 6524 ovn.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:42Z\\\",\\\"message\\\":\\\"olumn _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:42.809589 6922 services_controller.go:445] Built service openshift-marketplace/redhat-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nI0128 12:47:42.809584 6922 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:42.809605 6922 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, T\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.524633 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.539656 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:34Z\\\",\\\"message\\\":\\\"2026-01-28T12:46:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6\\\\n2026-01-28T12:46:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6 to /host/opt/cni/bin/\\\\n2026-01-28T12:46:49Z [verbose] multus-daemon started\\\\n2026-01-28T12:46:49Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:47:34Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.554410 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.566118 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.566333 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.566445 4848 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.566437 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:47.566399568 +0000 UTC m=+154.478616626 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.566690 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.566738 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:48:47.566706857 +0000 UTC m=+154.478923915 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.566779 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.566916 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.566926 4848 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.567017 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:48:47.566989764 +0000 UTC m=+154.479207002 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.566944 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.567054 4848 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.567133 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:48:47.567121458 +0000 UTC m=+154.479338666 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.570160 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.573193 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.573227 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.573238 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.573274 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.573286 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.585585 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.599784 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.616169 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.628778 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.641845 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.655070 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.666319 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.667831 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.668002 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.668030 4848 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.668045 4848 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.668112 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:48:47.6680915 +0000 UTC m=+154.580308538 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.677639 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.677697 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.677711 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.677735 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.677751 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.683527 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.698989 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.716154 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:43Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.780553 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.780594 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.780605 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.780623 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.780676 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.834446 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 18:28:26.872971203 +0000 UTC Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.850071 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.850132 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.850290 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:43 crc kubenswrapper[4848]: E0128 12:47:43.850474 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.883624 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.883686 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.883710 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.883738 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.883757 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.986950 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.987029 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.987041 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.987059 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:43 crc kubenswrapper[4848]: I0128 12:47:43.987070 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:43Z","lastTransitionTime":"2026-01-28T12:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.090380 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.090432 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.090443 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.090466 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.090477 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.195026 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.195096 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.195122 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.195179 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.195214 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.298543 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.298606 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.298624 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.298652 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.298672 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.402231 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.402291 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.402305 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.402324 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.402337 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.465426 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/3.log" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.471843 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:47:44 crc kubenswrapper[4848]: E0128 12:47:44.472163 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.497425 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:42Z\\\",\\\"message\\\":\\\"olumn _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:42.809589 6922 services_controller.go:445] Built service openshift-marketplace/redhat-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nI0128 12:47:42.809584 6922 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:42.809605 6922 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, T\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.507532 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.507952 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.508217 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.508458 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.508620 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.519411 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.537615 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.549234 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.561388 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.571798 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.583077 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bmnpt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f51c55-df27-4e41-b7c5-e3d714909803\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:34Z\\\",\\\"message\\\":\\\"2026-01-28T12:46:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6\\\\n2026-01-28T12:46:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de0d1539-dc0c-4cec-832c-1c3123f46fc6 to /host/opt/cni/bin/\\\\n2026-01-28T12:46:49Z [verbose] multus-daemon started\\\\n2026-01-28T12:46:49Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:47:34Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fstpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bmnpt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.597172 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.606739 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.611051 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.611101 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.611114 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.611162 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.611175 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.617885 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.631846 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55fccf751865466eec868116cc8a710f207729625d2e45bd94318388070b45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.643060 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://febbec93464cf9aae32697d4ea9461b571982035510299688854107f6c06e1d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.654863 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.667106 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21791a5e793ed1ffeb1012a6829a9b79f10001cb751dce2d1509f6be1d2df201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68f9ff312685ca5882185ea284d2b0118fcaf1d30bad5bda043701bb30006b0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.677774 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wkg8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f36b4b4-c850-44cc-b422-89ce6fe024cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4503f8dc70a75a57cbba9728600113b7ce5aee5659f486fb6bfe89e96c57d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nr2rq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wkg8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.713922 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.713952 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.713960 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.713974 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.713983 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.715885 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d447736-dd38-45b5-be15-2380dc55ad3d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m624n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wqtnc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.746769 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b93da01-044f-4540-8248-6d19f14ce06d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:46:33Z\\\",\\\"message\\\":\\\"W0128 12:46:22.317024 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:46:22.317449 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769604382 cert, and key in /tmp/serving-cert-1243744878/serving-signer.crt, /tmp/serving-cert-1243744878/serving-signer.key\\\\nI0128 12:46:22.833145 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:46:22.835221 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:46:22.835447 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:46:22.836365 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1243744878/tls.crt::/tmp/serving-cert-1243744878/tls.key\\\\\\\"\\\\nF0128 12:46:33.169117 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.816070 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.816502 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.816582 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.816619 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.816638 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.835678 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 13:16:21.14198671 +0000 UTC Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.849335 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.849567 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:44 crc kubenswrapper[4848]: E0128 12:47:44.849743 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:44 crc kubenswrapper[4848]: E0128 12:47:44.849974 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.864844 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.878620 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30570a21-e260-4494-89cd-2643cb0ca288\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40cb5a5cb3912238aa9abc2d77e4431e2ce4e195f896cbd4fe76a379b01fc782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffq99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vfhvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.899596 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:47:42Z\\\",\\\"message\\\":\\\"olumn _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:42.809589 6922 services_controller.go:445] Built service openshift-marketplace/redhat-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nI0128 12:47:42.809584 6922 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:47:42.809605 6922 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, T\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:47:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr5bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g9vht\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.910533 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8b8c4d9-ca19-4ac2-8c3e-bdc023238593\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a2a5da4fe4d92b1b8a47a379a0b499890816d3d71e7dbdce3c482112dc27f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edb61fa49ee92234b920cfe46764184313d66c01758d890655052b6e2a865ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8xp64\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-lsfjz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.919303 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.919353 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.919362 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.919378 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.919388 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:44Z","lastTransitionTime":"2026-01-28T12:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.924837 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-96648" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa3c7276-5cf5-47da-afc7-eb68e028f483\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf5c7a3c5f7a0173451686dd7d96774eb2aac8a53d8ab3ec8b64b7b657cdf30b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://af2b8178290d89720caa70a9854d39cdf70da89b8885a0d5daf735cd43494e1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c012f02f680c80b8b40453cd268ea7557c4183cf99aae51b878c0f2f56bad47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2505bd0aca3275583fca4cbed46cb701018f58a54103f6f3667532352ca9e4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://813e2e0781a55b09e6cb86b4d21b8f4af8ae1a31702e9e5be89da8a44995a244\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://663fdcb5b6626d88cd39f89a30fdb0500bad3a7a3d367b1b88ca300f3d73d667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2b13fe3fa1b5d9ccf281d304c03dfa4cb77ddb34fd627e3cd0c5c3588d930de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7z4fj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-96648\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.937160 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pzvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5333926-d7c2-4039-84d7-b6b063baa185\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5b26d46678849832b4ee0f67822198fe77b641b4ebab99b425d70b3d4685405\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qx99h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pzvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.954300 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"20cf726d-9e48-4420-a872-1fba43f424d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:47:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd1d2f9d81d2a44043a45d46470d2f41d56592497af438d541680c889abd3e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748f42714adce7ee2180da29cc186e22a4775d0f7fed0f414aaf78105176b1f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46de8d596cd4e6f2871795d26554177b66beba211722dc9b38fc601b0ca0c056\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bc73f498c3d096b23f68b73ccb0f7317fb98f943ab15ce2ad077eb4e4b850e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.966572 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"554c659f-bf1b-4d77-9a23-e33b04378550\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d4958cd57b1a06d69b28d9ec469fd11255e2839e58d3c6ad54df75f10e9287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://423e82e5320a80f969e1f8dc32628f651abc7c89015961dc92bcbbb1313bb954\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0318b762151341bd1aea99ed84eca065e3e6381c2340e74293fd429783171f65\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:46:15Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:44 crc kubenswrapper[4848]: I0128 12:47:44.977868 4848 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:47:44Z is after 2025-08-24T17:21:41Z" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.022937 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-bmnpt" podStartSLOduration=65.022908307 podStartE2EDuration="1m5.022908307s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:45.008525555 +0000 UTC m=+91.920742593" watchObservedRunningTime="2026-01-28 12:47:45.022908307 +0000 UTC m=+91.935125365" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.023109 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.023138 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.023145 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.023162 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.023174 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.074924 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=66.074896539 podStartE2EDuration="1m6.074896539s" podCreationTimestamp="2026-01-28 12:46:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:45.060151077 +0000 UTC m=+91.972368135" watchObservedRunningTime="2026-01-28 12:47:45.074896539 +0000 UTC m=+91.987113577" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.103687 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-wkg8g" podStartSLOduration=65.103654814 podStartE2EDuration="1m5.103654814s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:45.102882321 +0000 UTC m=+92.015099370" watchObservedRunningTime="2026-01-28 12:47:45.103654814 +0000 UTC m=+92.015871872" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.126344 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.126399 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.126415 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.126438 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.126452 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.229567 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.229900 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.229999 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.230112 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.230293 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.333295 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.333333 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.333342 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.333357 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.333365 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.436393 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.436655 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.436729 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.436797 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.436864 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.540453 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.540510 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.540524 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.540547 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.540560 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.644241 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.644309 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.644320 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.644338 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.644371 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.747523 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.747553 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.747561 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.747575 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.747588 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.835872 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 20:12:25.517784094 +0000 UTC Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.849022 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.849070 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:45 crc kubenswrapper[4848]: E0128 12:47:45.849159 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:45 crc kubenswrapper[4848]: E0128 12:47:45.849278 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.850520 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.850566 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.850579 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.850597 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.850610 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.953369 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.953413 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.953424 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.953441 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:45 crc kubenswrapper[4848]: I0128 12:47:45.953454 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:45Z","lastTransitionTime":"2026-01-28T12:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.056817 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.057118 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.057300 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.057459 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.057582 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.161397 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.161456 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.161466 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.161487 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.161499 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.265033 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.265087 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.265098 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.265116 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.265128 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.367894 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.367938 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.367949 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.367969 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.367991 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.470612 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.470651 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.470678 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.470693 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.470704 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.573339 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.573388 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.573403 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.573422 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.573434 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.677269 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.677328 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.677341 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.677362 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.677376 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.780997 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.781053 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.781064 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.781080 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.781092 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.836628 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 21:03:06.972946857 +0000 UTC Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.849423 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.849440 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:46 crc kubenswrapper[4848]: E0128 12:47:46.849605 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:46 crc kubenswrapper[4848]: E0128 12:47:46.849669 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.883393 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.883448 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.883464 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.883485 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.883498 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.986395 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.986456 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.986469 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.986486 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:46 crc kubenswrapper[4848]: I0128 12:47:46.986499 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:46Z","lastTransitionTime":"2026-01-28T12:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.089610 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.089677 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.089695 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.089721 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.089740 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.193901 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.193990 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.194014 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.194075 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.194114 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.300998 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.301046 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.301058 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.301079 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.301092 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.404281 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.404329 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.404339 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.404356 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.404368 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.507565 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.507625 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.507644 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.507672 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.507691 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.610595 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.610658 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.610673 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.610699 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.610714 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.713666 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.713718 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.713727 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.713744 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.713758 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.815930 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.815986 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.815997 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.816013 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.816026 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.837670 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 10:45:53.398763545 +0000 UTC Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.849033 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.849045 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:47 crc kubenswrapper[4848]: E0128 12:47:47.849357 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:47 crc kubenswrapper[4848]: E0128 12:47:47.849494 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.918805 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.918858 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.918874 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.918891 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:47 crc kubenswrapper[4848]: I0128 12:47:47.918902 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:47Z","lastTransitionTime":"2026-01-28T12:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.021966 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.022029 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.022042 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.022064 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.022078 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:48Z","lastTransitionTime":"2026-01-28T12:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.124277 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.124358 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.124391 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.124410 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.124422 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:48Z","lastTransitionTime":"2026-01-28T12:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.228233 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.228323 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.228334 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.228357 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.228370 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:48Z","lastTransitionTime":"2026-01-28T12:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.331605 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.332082 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.332172 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.332326 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.332454 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:48Z","lastTransitionTime":"2026-01-28T12:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.435571 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.435640 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.435657 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.435684 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.435704 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:48Z","lastTransitionTime":"2026-01-28T12:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.504743 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.504787 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.504798 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.504818 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.504830 4848 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:47:48Z","lastTransitionTime":"2026-01-28T12:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.553609 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw"] Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.554284 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.557113 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.557179 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.558000 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.560704 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.622887 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30417bb5-0359-4002-9bdd-e4beae9f91dd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.623042 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/30417bb5-0359-4002-9bdd-e4beae9f91dd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.623083 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30417bb5-0359-4002-9bdd-e4beae9f91dd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.623113 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/30417bb5-0359-4002-9bdd-e4beae9f91dd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.623155 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/30417bb5-0359-4002-9bdd-e4beae9f91dd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.627935 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podStartSLOduration=68.627906435 podStartE2EDuration="1m8.627906435s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:48.599714186 +0000 UTC m=+95.511931224" watchObservedRunningTime="2026-01-28 12:47:48.627906435 +0000 UTC m=+95.540123483" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.658157 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-lsfjz" podStartSLOduration=67.658127929 podStartE2EDuration="1m7.658127929s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:48.643301575 +0000 UTC m=+95.555518623" watchObservedRunningTime="2026-01-28 12:47:48.658127929 +0000 UTC m=+95.570344967" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.658815 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=45.658807819 podStartE2EDuration="45.658807819s" podCreationTimestamp="2026-01-28 12:47:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:48.658309504 +0000 UTC m=+95.570526542" watchObservedRunningTime="2026-01-28 12:47:48.658807819 +0000 UTC m=+95.571024857" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.701168 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=69.701140882 podStartE2EDuration="1m9.701140882s" podCreationTimestamp="2026-01-28 12:46:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:48.687909612 +0000 UTC m=+95.600126650" watchObservedRunningTime="2026-01-28 12:47:48.701140882 +0000 UTC m=+95.613357920" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.724462 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30417bb5-0359-4002-9bdd-e4beae9f91dd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.724871 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30417bb5-0359-4002-9bdd-e4beae9f91dd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.724964 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/30417bb5-0359-4002-9bdd-e4beae9f91dd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.725046 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/30417bb5-0359-4002-9bdd-e4beae9f91dd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.725129 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/30417bb5-0359-4002-9bdd-e4beae9f91dd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.725267 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/30417bb5-0359-4002-9bdd-e4beae9f91dd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.725062 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/30417bb5-0359-4002-9bdd-e4beae9f91dd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.726195 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/30417bb5-0359-4002-9bdd-e4beae9f91dd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.732850 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30417bb5-0359-4002-9bdd-e4beae9f91dd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.736503 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-96648" podStartSLOduration=68.736481729 podStartE2EDuration="1m8.736481729s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:48.721228923 +0000 UTC m=+95.633445961" watchObservedRunningTime="2026-01-28 12:47:48.736481729 +0000 UTC m=+95.648698767" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.736947 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-7pzvm" podStartSLOduration=68.736940962 podStartE2EDuration="1m8.736940962s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:48.736308124 +0000 UTC m=+95.648525182" watchObservedRunningTime="2026-01-28 12:47:48.736940962 +0000 UTC m=+95.649158010" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.752232 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30417bb5-0359-4002-9bdd-e4beae9f91dd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kfhjw\" (UID: \"30417bb5-0359-4002-9bdd-e4beae9f91dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.838820 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 09:53:09.230885302 +0000 UTC Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.838908 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.847118 4848 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.849391 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:48 crc kubenswrapper[4848]: E0128 12:47:48.849570 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.849399 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:48 crc kubenswrapper[4848]: E0128 12:47:48.849797 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:48 crc kubenswrapper[4848]: I0128 12:47:48.868900 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" Jan 28 12:47:49 crc kubenswrapper[4848]: I0128 12:47:49.487027 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" event={"ID":"30417bb5-0359-4002-9bdd-e4beae9f91dd","Type":"ContainerStarted","Data":"46a69a06c691c435271b191d72da5e53253bae96d4c6c7b4626958935ce11a1f"} Jan 28 12:47:49 crc kubenswrapper[4848]: I0128 12:47:49.487098 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" event={"ID":"30417bb5-0359-4002-9bdd-e4beae9f91dd","Type":"ContainerStarted","Data":"b935bf1bd4c48f8698b0fdd831ee14678e9585990626c93431086f37cfe2809e"} Jan 28 12:47:49 crc kubenswrapper[4848]: I0128 12:47:49.503739 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kfhjw" podStartSLOduration=69.503706522 podStartE2EDuration="1m9.503706522s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:49.503578659 +0000 UTC m=+96.415795707" watchObservedRunningTime="2026-01-28 12:47:49.503706522 +0000 UTC m=+96.415923590" Jan 28 12:47:49 crc kubenswrapper[4848]: I0128 12:47:49.850147 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:49 crc kubenswrapper[4848]: I0128 12:47:49.850163 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:49 crc kubenswrapper[4848]: E0128 12:47:49.850502 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:49 crc kubenswrapper[4848]: E0128 12:47:49.850753 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:49 crc kubenswrapper[4848]: I0128 12:47:49.865062 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 28 12:47:50 crc kubenswrapper[4848]: I0128 12:47:50.849628 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:50 crc kubenswrapper[4848]: I0128 12:47:50.849729 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:50 crc kubenswrapper[4848]: E0128 12:47:50.850067 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:50 crc kubenswrapper[4848]: E0128 12:47:50.850236 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:51 crc kubenswrapper[4848]: I0128 12:47:51.849621 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:51 crc kubenswrapper[4848]: E0128 12:47:51.849735 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:51 crc kubenswrapper[4848]: I0128 12:47:51.849917 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:51 crc kubenswrapper[4848]: E0128 12:47:51.849962 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:51 crc kubenswrapper[4848]: I0128 12:47:51.865006 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 28 12:47:52 crc kubenswrapper[4848]: I0128 12:47:52.849363 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:52 crc kubenswrapper[4848]: I0128 12:47:52.849387 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:52 crc kubenswrapper[4848]: E0128 12:47:52.849551 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:52 crc kubenswrapper[4848]: E0128 12:47:52.849647 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:53 crc kubenswrapper[4848]: I0128 12:47:53.849292 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:53 crc kubenswrapper[4848]: I0128 12:47:53.849450 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:53 crc kubenswrapper[4848]: E0128 12:47:53.849583 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:53 crc kubenswrapper[4848]: E0128 12:47:53.849712 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:54 crc kubenswrapper[4848]: I0128 12:47:54.851779 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:54 crc kubenswrapper[4848]: I0128 12:47:54.851824 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:54 crc kubenswrapper[4848]: E0128 12:47:54.852034 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:54 crc kubenswrapper[4848]: E0128 12:47:54.852138 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:54 crc kubenswrapper[4848]: I0128 12:47:54.880567 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=3.880543154 podStartE2EDuration="3.880543154s" podCreationTimestamp="2026-01-28 12:47:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:54.878108116 +0000 UTC m=+101.790325154" watchObservedRunningTime="2026-01-28 12:47:54.880543154 +0000 UTC m=+101.792760212" Jan 28 12:47:54 crc kubenswrapper[4848]: I0128 12:47:54.891792 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=5.891769848 podStartE2EDuration="5.891769848s" podCreationTimestamp="2026-01-28 12:47:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:47:54.890909703 +0000 UTC m=+101.803126741" watchObservedRunningTime="2026-01-28 12:47:54.891769848 +0000 UTC m=+101.803986876" Jan 28 12:47:55 crc kubenswrapper[4848]: I0128 12:47:55.848967 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:55 crc kubenswrapper[4848]: E0128 12:47:55.849694 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:55 crc kubenswrapper[4848]: I0128 12:47:55.849074 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:55 crc kubenswrapper[4848]: E0128 12:47:55.849956 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:56 crc kubenswrapper[4848]: I0128 12:47:56.849932 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:56 crc kubenswrapper[4848]: I0128 12:47:56.851007 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:56 crc kubenswrapper[4848]: E0128 12:47:56.851481 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:56 crc kubenswrapper[4848]: E0128 12:47:56.851586 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:56 crc kubenswrapper[4848]: I0128 12:47:56.851781 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:47:56 crc kubenswrapper[4848]: E0128 12:47:56.851959 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:47:57 crc kubenswrapper[4848]: I0128 12:47:57.849182 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:57 crc kubenswrapper[4848]: I0128 12:47:57.849329 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:57 crc kubenswrapper[4848]: E0128 12:47:57.849741 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:47:57 crc kubenswrapper[4848]: E0128 12:47:57.849945 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:58 crc kubenswrapper[4848]: I0128 12:47:58.849388 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:47:58 crc kubenswrapper[4848]: I0128 12:47:58.849434 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:58 crc kubenswrapper[4848]: E0128 12:47:58.850625 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:47:58 crc kubenswrapper[4848]: E0128 12:47:58.850634 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:47:58 crc kubenswrapper[4848]: I0128 12:47:58.943738 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:47:58 crc kubenswrapper[4848]: E0128 12:47:58.944128 4848 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:58 crc kubenswrapper[4848]: E0128 12:47:58.944184 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs podName:8d447736-dd38-45b5-be15-2380dc55ad3d nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.944168059 +0000 UTC m=+169.856385097 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs") pod "network-metrics-daemon-wqtnc" (UID: "8d447736-dd38-45b5-be15-2380dc55ad3d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:47:59 crc kubenswrapper[4848]: I0128 12:47:59.849793 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:47:59 crc kubenswrapper[4848]: I0128 12:47:59.849869 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:47:59 crc kubenswrapper[4848]: E0128 12:47:59.850016 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:47:59 crc kubenswrapper[4848]: E0128 12:47:59.850118 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:00 crc kubenswrapper[4848]: I0128 12:48:00.849778 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:00 crc kubenswrapper[4848]: I0128 12:48:00.849849 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:00 crc kubenswrapper[4848]: E0128 12:48:00.850158 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:00 crc kubenswrapper[4848]: E0128 12:48:00.850404 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:01 crc kubenswrapper[4848]: I0128 12:48:01.849306 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:01 crc kubenswrapper[4848]: I0128 12:48:01.849334 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:01 crc kubenswrapper[4848]: E0128 12:48:01.849497 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:01 crc kubenswrapper[4848]: E0128 12:48:01.849602 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:02 crc kubenswrapper[4848]: I0128 12:48:02.849540 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:02 crc kubenswrapper[4848]: I0128 12:48:02.849544 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:02 crc kubenswrapper[4848]: E0128 12:48:02.849760 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:02 crc kubenswrapper[4848]: E0128 12:48:02.849904 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:03 crc kubenswrapper[4848]: I0128 12:48:03.849678 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:03 crc kubenswrapper[4848]: I0128 12:48:03.849810 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:03 crc kubenswrapper[4848]: E0128 12:48:03.849872 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:03 crc kubenswrapper[4848]: E0128 12:48:03.849986 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:04 crc kubenswrapper[4848]: I0128 12:48:04.849895 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:04 crc kubenswrapper[4848]: I0128 12:48:04.850014 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:04 crc kubenswrapper[4848]: E0128 12:48:04.852125 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:04 crc kubenswrapper[4848]: E0128 12:48:04.852384 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:05 crc kubenswrapper[4848]: I0128 12:48:05.849697 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:05 crc kubenswrapper[4848]: I0128 12:48:05.849745 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:05 crc kubenswrapper[4848]: E0128 12:48:05.849858 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:05 crc kubenswrapper[4848]: E0128 12:48:05.849936 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:06 crc kubenswrapper[4848]: I0128 12:48:06.849339 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:06 crc kubenswrapper[4848]: I0128 12:48:06.849463 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:06 crc kubenswrapper[4848]: E0128 12:48:06.849489 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:06 crc kubenswrapper[4848]: E0128 12:48:06.849705 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:07 crc kubenswrapper[4848]: I0128 12:48:07.849147 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:07 crc kubenswrapper[4848]: I0128 12:48:07.849151 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:07 crc kubenswrapper[4848]: E0128 12:48:07.849354 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:07 crc kubenswrapper[4848]: E0128 12:48:07.849641 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:08 crc kubenswrapper[4848]: I0128 12:48:08.849711 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:08 crc kubenswrapper[4848]: I0128 12:48:08.849778 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:08 crc kubenswrapper[4848]: E0128 12:48:08.849940 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:08 crc kubenswrapper[4848]: E0128 12:48:08.850073 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:09 crc kubenswrapper[4848]: I0128 12:48:09.849599 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:09 crc kubenswrapper[4848]: I0128 12:48:09.849634 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:09 crc kubenswrapper[4848]: E0128 12:48:09.850048 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:09 crc kubenswrapper[4848]: E0128 12:48:09.850413 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:10 crc kubenswrapper[4848]: I0128 12:48:10.897516 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:10 crc kubenswrapper[4848]: I0128 12:48:10.897544 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:10 crc kubenswrapper[4848]: E0128 12:48:10.897708 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:10 crc kubenswrapper[4848]: E0128 12:48:10.897952 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:10 crc kubenswrapper[4848]: I0128 12:48:10.898785 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:48:10 crc kubenswrapper[4848]: E0128 12:48:10.899032 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:48:11 crc kubenswrapper[4848]: I0128 12:48:11.849784 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:11 crc kubenswrapper[4848]: I0128 12:48:11.849864 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:11 crc kubenswrapper[4848]: E0128 12:48:11.850398 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:11 crc kubenswrapper[4848]: E0128 12:48:11.850385 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:12 crc kubenswrapper[4848]: I0128 12:48:12.850030 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:12 crc kubenswrapper[4848]: I0128 12:48:12.850070 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:12 crc kubenswrapper[4848]: E0128 12:48:12.850385 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:12 crc kubenswrapper[4848]: E0128 12:48:12.850536 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:13 crc kubenswrapper[4848]: I0128 12:48:13.848998 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:13 crc kubenswrapper[4848]: I0128 12:48:13.849211 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:13 crc kubenswrapper[4848]: E0128 12:48:13.849402 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:13 crc kubenswrapper[4848]: E0128 12:48:13.849580 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:14 crc kubenswrapper[4848]: E0128 12:48:14.810949 4848 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 28 12:48:14 crc kubenswrapper[4848]: I0128 12:48:14.850418 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:14 crc kubenswrapper[4848]: I0128 12:48:14.850376 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:14 crc kubenswrapper[4848]: E0128 12:48:14.853423 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:14 crc kubenswrapper[4848]: E0128 12:48:14.853721 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:15 crc kubenswrapper[4848]: E0128 12:48:15.162863 4848 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 12:48:15 crc kubenswrapper[4848]: I0128 12:48:15.849099 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:15 crc kubenswrapper[4848]: I0128 12:48:15.849119 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:15 crc kubenswrapper[4848]: E0128 12:48:15.849513 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:15 crc kubenswrapper[4848]: E0128 12:48:15.849653 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:16 crc kubenswrapper[4848]: I0128 12:48:16.849690 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:16 crc kubenswrapper[4848]: E0128 12:48:16.849900 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:16 crc kubenswrapper[4848]: I0128 12:48:16.850426 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:16 crc kubenswrapper[4848]: E0128 12:48:16.850640 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:17 crc kubenswrapper[4848]: I0128 12:48:17.849661 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:17 crc kubenswrapper[4848]: I0128 12:48:17.849764 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:17 crc kubenswrapper[4848]: E0128 12:48:17.850212 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:17 crc kubenswrapper[4848]: E0128 12:48:17.850110 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:18 crc kubenswrapper[4848]: I0128 12:48:18.849400 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:18 crc kubenswrapper[4848]: I0128 12:48:18.849478 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:18 crc kubenswrapper[4848]: E0128 12:48:18.849551 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:18 crc kubenswrapper[4848]: E0128 12:48:18.849670 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:19 crc kubenswrapper[4848]: I0128 12:48:19.849604 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:19 crc kubenswrapper[4848]: I0128 12:48:19.849640 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:19 crc kubenswrapper[4848]: E0128 12:48:19.849802 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:19 crc kubenswrapper[4848]: E0128 12:48:19.849915 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:20 crc kubenswrapper[4848]: E0128 12:48:20.164738 4848 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 12:48:20 crc kubenswrapper[4848]: I0128 12:48:20.849636 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:20 crc kubenswrapper[4848]: I0128 12:48:20.849890 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:20 crc kubenswrapper[4848]: E0128 12:48:20.850053 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:20 crc kubenswrapper[4848]: E0128 12:48:20.850125 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.607521 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/1.log" Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.608171 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/0.log" Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.608225 4848 generic.go:334] "Generic (PLEG): container finished" podID="52f51c55-df27-4e41-b7c5-e3d714909803" containerID="6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6" exitCode=1 Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.608292 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerDied","Data":"6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6"} Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.608347 4848 scope.go:117] "RemoveContainer" containerID="c5bd8fc7a7e2bf0bf385758036db8c2ce7dd9935871219d596509dfa97b57ade" Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.608867 4848 scope.go:117] "RemoveContainer" containerID="6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6" Jan 28 12:48:21 crc kubenswrapper[4848]: E0128 12:48:21.609192 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-bmnpt_openshift-multus(52f51c55-df27-4e41-b7c5-e3d714909803)\"" pod="openshift-multus/multus-bmnpt" podUID="52f51c55-df27-4e41-b7c5-e3d714909803" Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.849473 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:21 crc kubenswrapper[4848]: I0128 12:48:21.849577 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:21 crc kubenswrapper[4848]: E0128 12:48:21.849634 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:21 crc kubenswrapper[4848]: E0128 12:48:21.849821 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:22 crc kubenswrapper[4848]: I0128 12:48:22.613727 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/1.log" Jan 28 12:48:22 crc kubenswrapper[4848]: I0128 12:48:22.849122 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:22 crc kubenswrapper[4848]: E0128 12:48:22.849551 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:22 crc kubenswrapper[4848]: I0128 12:48:22.849839 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:48:22 crc kubenswrapper[4848]: E0128 12:48:22.850002 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g9vht_openshift-ovn-kubernetes(a67a8b01-b8a6-4ca0-96fb-d5af26125a8d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" Jan 28 12:48:22 crc kubenswrapper[4848]: I0128 12:48:22.850117 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:22 crc kubenswrapper[4848]: E0128 12:48:22.850420 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:23 crc kubenswrapper[4848]: I0128 12:48:23.849036 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:23 crc kubenswrapper[4848]: E0128 12:48:23.849203 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:23 crc kubenswrapper[4848]: I0128 12:48:23.850154 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:23 crc kubenswrapper[4848]: E0128 12:48:23.850512 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:24 crc kubenswrapper[4848]: I0128 12:48:24.850453 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:24 crc kubenswrapper[4848]: I0128 12:48:24.851859 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:24 crc kubenswrapper[4848]: E0128 12:48:24.852007 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:24 crc kubenswrapper[4848]: E0128 12:48:24.852213 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:25 crc kubenswrapper[4848]: E0128 12:48:25.166316 4848 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 12:48:25 crc kubenswrapper[4848]: I0128 12:48:25.849751 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:25 crc kubenswrapper[4848]: I0128 12:48:25.849848 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:25 crc kubenswrapper[4848]: E0128 12:48:25.850416 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:25 crc kubenswrapper[4848]: E0128 12:48:25.850579 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:26 crc kubenswrapper[4848]: I0128 12:48:26.849025 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:26 crc kubenswrapper[4848]: I0128 12:48:26.849142 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:26 crc kubenswrapper[4848]: E0128 12:48:26.849673 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:26 crc kubenswrapper[4848]: E0128 12:48:26.849705 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:27 crc kubenswrapper[4848]: I0128 12:48:27.850076 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:27 crc kubenswrapper[4848]: I0128 12:48:27.850160 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:27 crc kubenswrapper[4848]: E0128 12:48:27.850390 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:27 crc kubenswrapper[4848]: E0128 12:48:27.850570 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:28 crc kubenswrapper[4848]: I0128 12:48:28.850040 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:28 crc kubenswrapper[4848]: I0128 12:48:28.850211 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:28 crc kubenswrapper[4848]: E0128 12:48:28.851237 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:28 crc kubenswrapper[4848]: E0128 12:48:28.851449 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:29 crc kubenswrapper[4848]: I0128 12:48:29.849862 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:29 crc kubenswrapper[4848]: I0128 12:48:29.849920 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:29 crc kubenswrapper[4848]: E0128 12:48:29.850123 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:29 crc kubenswrapper[4848]: E0128 12:48:29.850535 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:30 crc kubenswrapper[4848]: E0128 12:48:30.167466 4848 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 12:48:30 crc kubenswrapper[4848]: I0128 12:48:30.849129 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:30 crc kubenswrapper[4848]: I0128 12:48:30.849240 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:30 crc kubenswrapper[4848]: E0128 12:48:30.849388 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:30 crc kubenswrapper[4848]: E0128 12:48:30.849535 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:31 crc kubenswrapper[4848]: I0128 12:48:31.849458 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:31 crc kubenswrapper[4848]: E0128 12:48:31.849663 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:31 crc kubenswrapper[4848]: I0128 12:48:31.850050 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:31 crc kubenswrapper[4848]: E0128 12:48:31.850357 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:32 crc kubenswrapper[4848]: I0128 12:48:32.849651 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:32 crc kubenswrapper[4848]: E0128 12:48:32.849846 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:32 crc kubenswrapper[4848]: I0128 12:48:32.850092 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:32 crc kubenswrapper[4848]: E0128 12:48:32.850148 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:33 crc kubenswrapper[4848]: I0128 12:48:33.849477 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:33 crc kubenswrapper[4848]: I0128 12:48:33.849552 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:33 crc kubenswrapper[4848]: E0128 12:48:33.849681 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:33 crc kubenswrapper[4848]: E0128 12:48:33.849824 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:34 crc kubenswrapper[4848]: I0128 12:48:34.849799 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:34 crc kubenswrapper[4848]: I0128 12:48:34.849817 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:34 crc kubenswrapper[4848]: E0128 12:48:34.851929 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:34 crc kubenswrapper[4848]: E0128 12:48:34.852080 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:34 crc kubenswrapper[4848]: I0128 12:48:34.852537 4848 scope.go:117] "RemoveContainer" containerID="6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6" Jan 28 12:48:35 crc kubenswrapper[4848]: E0128 12:48:35.171136 4848 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 12:48:35 crc kubenswrapper[4848]: I0128 12:48:35.660195 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/1.log" Jan 28 12:48:35 crc kubenswrapper[4848]: I0128 12:48:35.660303 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerStarted","Data":"48213125717d28145348d56b365f5cf3ae7ce7690b5dc23aec948d05ef4b7fea"} Jan 28 12:48:35 crc kubenswrapper[4848]: I0128 12:48:35.849935 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:35 crc kubenswrapper[4848]: I0128 12:48:35.850002 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:35 crc kubenswrapper[4848]: E0128 12:48:35.850424 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:35 crc kubenswrapper[4848]: E0128 12:48:35.850612 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:35 crc kubenswrapper[4848]: I0128 12:48:35.850825 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:48:36 crc kubenswrapper[4848]: I0128 12:48:36.666610 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/3.log" Jan 28 12:48:36 crc kubenswrapper[4848]: I0128 12:48:36.670278 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerStarted","Data":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} Jan 28 12:48:36 crc kubenswrapper[4848]: I0128 12:48:36.671563 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:48:36 crc kubenswrapper[4848]: I0128 12:48:36.708439 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podStartSLOduration=116.708419793 podStartE2EDuration="1m56.708419793s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:36.707660262 +0000 UTC m=+143.619877310" watchObservedRunningTime="2026-01-28 12:48:36.708419793 +0000 UTC m=+143.620636841" Jan 28 12:48:36 crc kubenswrapper[4848]: I0128 12:48:36.837664 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-wqtnc"] Jan 28 12:48:36 crc kubenswrapper[4848]: I0128 12:48:36.837848 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:36 crc kubenswrapper[4848]: E0128 12:48:36.837983 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:36 crc kubenswrapper[4848]: I0128 12:48:36.849732 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:36 crc kubenswrapper[4848]: E0128 12:48:36.849933 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:37 crc kubenswrapper[4848]: I0128 12:48:37.849442 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:37 crc kubenswrapper[4848]: I0128 12:48:37.849502 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:37 crc kubenswrapper[4848]: E0128 12:48:37.849690 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:37 crc kubenswrapper[4848]: E0128 12:48:37.850005 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:38 crc kubenswrapper[4848]: I0128 12:48:38.849090 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:38 crc kubenswrapper[4848]: E0128 12:48:38.849468 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:48:38 crc kubenswrapper[4848]: I0128 12:48:38.849724 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:38 crc kubenswrapper[4848]: E0128 12:48:38.849803 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wqtnc" podUID="8d447736-dd38-45b5-be15-2380dc55ad3d" Jan 28 12:48:39 crc kubenswrapper[4848]: I0128 12:48:39.849794 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:39 crc kubenswrapper[4848]: I0128 12:48:39.849834 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:39 crc kubenswrapper[4848]: E0128 12:48:39.851063 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:48:39 crc kubenswrapper[4848]: E0128 12:48:39.851216 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:48:40 crc kubenswrapper[4848]: I0128 12:48:40.850130 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:48:40 crc kubenswrapper[4848]: I0128 12:48:40.851590 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:40 crc kubenswrapper[4848]: I0128 12:48:40.858444 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 12:48:40 crc kubenswrapper[4848]: I0128 12:48:40.859512 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 12:48:40 crc kubenswrapper[4848]: I0128 12:48:40.858745 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 12:48:40 crc kubenswrapper[4848]: I0128 12:48:40.859354 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 12:48:41 crc kubenswrapper[4848]: I0128 12:48:41.849752 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:41 crc kubenswrapper[4848]: I0128 12:48:41.850290 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:41 crc kubenswrapper[4848]: I0128 12:48:41.852183 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 12:48:41 crc kubenswrapper[4848]: I0128 12:48:41.852359 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.568911 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:47 crc kubenswrapper[4848]: E0128 12:48:47.569160 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:50:49.569114537 +0000 UTC m=+276.481331575 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.569330 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.569390 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.569520 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.576746 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.576875 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.578059 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.670979 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.675881 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.780012 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.865084 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:48:47 crc kubenswrapper[4848]: I0128 12:48:47.872780 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:48 crc kubenswrapper[4848]: W0128 12:48:48.015782 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-981f5d65d1fc90f0a0b9643b9bca80dfce12bb2cdecd0d24d78d76c3b95d12a4 WatchSource:0}: Error finding container 981f5d65d1fc90f0a0b9643b9bca80dfce12bb2cdecd0d24d78d76c3b95d12a4: Status 404 returned error can't find the container with id 981f5d65d1fc90f0a0b9643b9bca80dfce12bb2cdecd0d24d78d76c3b95d12a4 Jan 28 12:48:48 crc kubenswrapper[4848]: W0128 12:48:48.083006 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-f72ff71d2da6287332eb6c2e91a3ad49ab84c8928702f9ad6214b18703d3df4b WatchSource:0}: Error finding container f72ff71d2da6287332eb6c2e91a3ad49ab84c8928702f9ad6214b18703d3df4b: Status 404 returned error can't find the container with id f72ff71d2da6287332eb6c2e91a3ad49ab84c8928702f9ad6214b18703d3df4b Jan 28 12:48:48 crc kubenswrapper[4848]: W0128 12:48:48.132152 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-13fe39b8607752bebc7f0dec9b91d73fc2b617ac053d3ab62b635bba5c8e47b0 WatchSource:0}: Error finding container 13fe39b8607752bebc7f0dec9b91d73fc2b617ac053d3ab62b635bba5c8e47b0: Status 404 returned error can't find the container with id 13fe39b8607752bebc7f0dec9b91d73fc2b617ac053d3ab62b635bba5c8e47b0 Jan 28 12:48:48 crc kubenswrapper[4848]: I0128 12:48:48.715494 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"951c5a4fe86afb59274863c8f15a06754ff8516d9411039c32ee6790af9ae785"} Jan 28 12:48:48 crc kubenswrapper[4848]: I0128 12:48:48.715974 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"13fe39b8607752bebc7f0dec9b91d73fc2b617ac053d3ab62b635bba5c8e47b0"} Jan 28 12:48:48 crc kubenswrapper[4848]: I0128 12:48:48.716948 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:48:48 crc kubenswrapper[4848]: I0128 12:48:48.719654 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3f7883e04a9d2238a665923a7a2a3a210300c5c882550b72e956c90fe42d4792"} Jan 28 12:48:48 crc kubenswrapper[4848]: I0128 12:48:48.719715 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f72ff71d2da6287332eb6c2e91a3ad49ab84c8928702f9ad6214b18703d3df4b"} Jan 28 12:48:48 crc kubenswrapper[4848]: I0128 12:48:48.721940 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"22a2cadbcec691288fea3280168e25408e395df3fca9af669e7b2702fb795019"} Jan 28 12:48:48 crc kubenswrapper[4848]: I0128 12:48:48.721989 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"981f5d65d1fc90f0a0b9643b9bca80dfce12bb2cdecd0d24d78d76c3b95d12a4"} Jan 28 12:48:49 crc kubenswrapper[4848]: I0128 12:48:49.987919 4848 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.034338 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5dbnv"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.035130 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.036067 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-84b8w"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.036832 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-jltf4"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.036907 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.037210 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-jltf4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.042191 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.042808 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043062 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043226 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043289 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043320 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043320 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043423 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043588 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.043607 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.046129 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.047298 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.047659 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.047681 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.047661 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.047962 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.048668 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.048794 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.048963 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.049941 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zwpd8"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.050403 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.055821 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.056505 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.057852 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.059560 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-zdq5h"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.061128 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.061865 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.071326 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.071396 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.071388 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.071624 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.071835 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.072951 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fdf5k"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073310 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073412 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073426 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073553 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073741 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073803 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073885 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073898 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.074061 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073744 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.074498 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.074590 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.073756 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.075412 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.075491 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.075658 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.076460 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.078296 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.096919 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.096713 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.097214 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.098027 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.099281 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.100017 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.100196 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.100815 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.101229 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.101455 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.101740 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.103279 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.103506 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.104314 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.104335 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-audit\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.104815 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lvs6\" (UniqueName: \"kubernetes.io/projected/244b24d8-4734-4c9f-8ba3-ef7616581a58-kube-api-access-8lvs6\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.104903 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-oauth-serving-cert\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.104991 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ace4935-baa0-4bdd-8ab6-f57415730959-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.105083 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-config\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.105178 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-config\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.105440 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bcrg5"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.106175 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.106798 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.107191 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.108012 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csvmj\" (UniqueName: \"kubernetes.io/projected/4fa929eb-e746-4253-9cf6-dcb0939da532-kube-api-access-csvmj\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.108221 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-client-ca\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.108335 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-serving-cert\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.110837 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-trusted-ca-bundle\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111016 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca380f6-b02f-4128-86f6-ff19d22c532d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111108 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-serving-cert\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111177 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgxbg\" (UniqueName: \"kubernetes.io/projected/487b5d8d-0509-4510-8331-f36ac1bfe1a9-kube-api-access-hgxbg\") pod \"cluster-samples-operator-665b6dd947-vfjn4\" (UID: \"487b5d8d-0509-4510-8331-f36ac1bfe1a9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111286 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-serving-cert\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111596 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec9e32e6-39f8-4156-be3b-cda6046c017a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111693 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5q52\" (UniqueName: \"kubernetes.io/projected/c3bd5c0e-2656-4237-a9ab-e4de84101595-kube-api-access-f5q52\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111764 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nf4k\" (UniqueName: \"kubernetes.io/projected/0fe6e645-a059-4073-ad43-ecd9a8bdac14-kube-api-access-7nf4k\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111864 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/244b24d8-4734-4c9f-8ba3-ef7616581a58-node-pullsecrets\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111961 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrs45\" (UniqueName: \"kubernetes.io/projected/1afda1fa-7adc-4d82-bd34-5743e3b89b52-kube-api-access-hrs45\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.112039 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ace4935-baa0-4bdd-8ab6-f57415730959-config\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116236 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgqqk\" (UniqueName: \"kubernetes.io/projected/2ca380f6-b02f-4128-86f6-ff19d22c532d-kube-api-access-rgqqk\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116462 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ace4935-baa0-4bdd-8ab6-f57415730959-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116556 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec9e32e6-39f8-4156-be3b-cda6046c017a-config\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116646 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vct7d\" (UniqueName: \"kubernetes.io/projected/db02dfdf-9c10-4e70-80e6-29385127d7d7-kube-api-access-vct7d\") pod \"downloads-7954f5f757-jltf4\" (UID: \"db02dfdf-9c10-4e70-80e6-29385127d7d7\") " pod="openshift-console/downloads-7954f5f757-jltf4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116719 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0fe6e645-a059-4073-ad43-ecd9a8bdac14-trusted-ca\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116815 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1afda1fa-7adc-4d82-bd34-5743e3b89b52-serving-cert\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116900 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tss6v\" (UniqueName: \"kubernetes.io/projected/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-kube-api-access-tss6v\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116998 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa929eb-e746-4253-9cf6-dcb0939da532-config\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117173 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-oauth-config\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117338 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-etcd-client\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117407 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-image-import-ca\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117470 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ca380f6-b02f-4128-86f6-ff19d22c532d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117547 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0fe6e645-a059-4073-ad43-ecd9a8bdac14-serving-cert\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117615 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-service-ca\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117691 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1afda1fa-7adc-4d82-bd34-5743e3b89b52-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117829 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4fa929eb-e746-4253-9cf6-dcb0939da532-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117935 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118017 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4fa929eb-e746-4253-9cf6-dcb0939da532-images\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118097 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-encryption-config\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118171 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/244b24d8-4734-4c9f-8ba3-ef7616581a58-audit-dir\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118234 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec9e32e6-39f8-4156-be3b-cda6046c017a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118357 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-etcd-serving-ca\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118491 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-config\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118578 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-trusted-ca-bundle\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118692 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe6e645-a059-4073-ad43-ecd9a8bdac14-config\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118809 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/487b5d8d-0509-4510-8331-f36ac1bfe1a9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vfjn4\" (UID: \"487b5d8d-0509-4510-8331-f36ac1bfe1a9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.112874 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.111329 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.112939 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.113001 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.113386 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.113458 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.113558 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.113626 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.119972 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.113679 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.113712 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.115748 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.115922 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116217 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.120243 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116343 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116628 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116674 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.116981 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117193 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117346 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117645 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117863 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.117963 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118035 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118159 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118197 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118233 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.118402 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.119030 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.119908 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.127024 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qrnzf"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.127980 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.137308 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.137551 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.137700 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.140419 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.140680 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.143305 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.145357 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.145539 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.179409 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.182728 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.184706 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q4jxp"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.185127 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.195362 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.197375 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.197788 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.197991 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.198194 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.201718 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.202279 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.202522 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.202774 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.202984 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.207385 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.209651 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.223530 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.223604 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.223649 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-oauth-config\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.223757 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-544kr\" (UniqueName: \"kubernetes.io/projected/6471a57b-f563-440e-9fa7-2c24af8039c9-kube-api-access-544kr\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.223827 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ca380f6-b02f-4128-86f6-ff19d22c532d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.223867 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-etcd-client\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224018 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-image-import-ca\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224070 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224109 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0fe6e645-a059-4073-ad43-ecd9a8bdac14-serving-cert\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224142 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-service-ca\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224171 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1afda1fa-7adc-4d82-bd34-5743e3b89b52-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224203 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1a85f330-84c9-416c-9fcc-8e775a19599d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224265 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-config\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224302 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cjdm\" (UniqueName: \"kubernetes.io/projected/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-kube-api-access-8cjdm\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224339 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4fa929eb-e746-4253-9cf6-dcb0939da532-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224390 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224428 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k7f2\" (UniqueName: \"kubernetes.io/projected/1a85f330-84c9-416c-9fcc-8e775a19599d-kube-api-access-6k7f2\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224461 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224490 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1a85f330-84c9-416c-9fcc-8e775a19599d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224526 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-encryption-config\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224559 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224588 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4fa929eb-e746-4253-9cf6-dcb0939da532-images\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224623 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4f3b423f-a356-4d55-bc15-c476dd03d771-auth-proxy-config\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224650 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224688 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/244b24d8-4734-4c9f-8ba3-ef7616581a58-audit-dir\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.224716 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec9e32e6-39f8-4156-be3b-cda6046c017a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225066 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-etcd-serving-ca\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225160 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-config\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225335 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/be55bab9-c006-41aa-9347-817555da5dcf-metrics-tls\") pod \"dns-operator-744455d44c-bcrg5\" (UID: \"be55bab9-c006-41aa-9347-817555da5dcf\") " pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225453 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-trusted-ca-bundle\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225489 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe6e645-a059-4073-ad43-ecd9a8bdac14-config\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225539 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/487b5d8d-0509-4510-8331-f36ac1bfe1a9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vfjn4\" (UID: \"487b5d8d-0509-4510-8331-f36ac1bfe1a9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225667 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225709 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225749 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225784 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tgqs\" (UniqueName: \"kubernetes.io/projected/4f3b423f-a356-4d55-bc15-c476dd03d771-kube-api-access-7tgqs\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225818 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lvs6\" (UniqueName: \"kubernetes.io/projected/244b24d8-4734-4c9f-8ba3-ef7616581a58-kube-api-access-8lvs6\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225859 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-oauth-serving-cert\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225892 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ace4935-baa0-4bdd-8ab6-f57415730959-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225942 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fca29e57-7eeb-48d7-a4d9-2125f65816f4-serving-cert\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.225984 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-audit\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226020 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4f3b423f-a356-4d55-bc15-c476dd03d771-machine-approver-tls\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226049 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226078 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-config\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226109 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-config\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226142 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6wgw\" (UniqueName: \"kubernetes.io/projected/be55bab9-c006-41aa-9347-817555da5dcf-kube-api-access-s6wgw\") pod \"dns-operator-744455d44c-bcrg5\" (UID: \"be55bab9-c006-41aa-9347-817555da5dcf\") " pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226180 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-config\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226207 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-service-ca-bundle\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226265 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-dir\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226298 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-client-ca\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226324 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csvmj\" (UniqueName: \"kubernetes.io/projected/4fa929eb-e746-4253-9cf6-dcb0939da532-kube-api-access-csvmj\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226350 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6471a57b-f563-440e-9fa7-2c24af8039c9-serving-cert\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226377 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca380f6-b02f-4128-86f6-ff19d22c532d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226401 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-serving-cert\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226430 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgxbg\" (UniqueName: \"kubernetes.io/projected/487b5d8d-0509-4510-8331-f36ac1bfe1a9-kube-api-access-hgxbg\") pod \"cluster-samples-operator-665b6dd947-vfjn4\" (UID: \"487b5d8d-0509-4510-8331-f36ac1bfe1a9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226491 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226529 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-serving-cert\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226577 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-trusted-ca-bundle\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226608 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-serving-cert\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226645 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec9e32e6-39f8-4156-be3b-cda6046c017a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226684 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5q52\" (UniqueName: \"kubernetes.io/projected/c3bd5c0e-2656-4237-a9ab-e4de84101595-kube-api-access-f5q52\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226713 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nf4k\" (UniqueName: \"kubernetes.io/projected/0fe6e645-a059-4073-ad43-ecd9a8bdac14-kube-api-access-7nf4k\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226752 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-policies\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226781 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgz7c\" (UniqueName: \"kubernetes.io/projected/fca29e57-7eeb-48d7-a4d9-2125f65816f4-kube-api-access-jgz7c\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226814 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/244b24d8-4734-4c9f-8ba3-ef7616581a58-node-pullsecrets\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226853 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226898 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrs45\" (UniqueName: \"kubernetes.io/projected/1afda1fa-7adc-4d82-bd34-5743e3b89b52-kube-api-access-hrs45\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226949 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.226983 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227037 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgqqk\" (UniqueName: \"kubernetes.io/projected/2ca380f6-b02f-4128-86f6-ff19d22c532d-kube-api-access-rgqqk\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227058 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ace4935-baa0-4bdd-8ab6-f57415730959-config\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227083 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1a85f330-84c9-416c-9fcc-8e775a19599d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227115 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f3b423f-a356-4d55-bc15-c476dd03d771-config\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227139 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bll9z\" (UniqueName: \"kubernetes.io/projected/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-kube-api-access-bll9z\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227159 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227181 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec9e32e6-39f8-4156-be3b-cda6046c017a-config\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227216 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ace4935-baa0-4bdd-8ab6-f57415730959-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227240 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-client-ca\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227288 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vct7d\" (UniqueName: \"kubernetes.io/projected/db02dfdf-9c10-4e70-80e6-29385127d7d7-kube-api-access-vct7d\") pod \"downloads-7954f5f757-jltf4\" (UID: \"db02dfdf-9c10-4e70-80e6-29385127d7d7\") " pod="openshift-console/downloads-7954f5f757-jltf4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227307 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0fe6e645-a059-4073-ad43-ecd9a8bdac14-trusted-ca\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227329 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tss6v\" (UniqueName: \"kubernetes.io/projected/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-kube-api-access-tss6v\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227356 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1afda1fa-7adc-4d82-bd34-5743e3b89b52-serving-cert\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227434 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.227454 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa929eb-e746-4253-9cf6-dcb0939da532-config\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.229682 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.230095 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-etcd-serving-ca\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.230950 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-config\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.236979 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.237330 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa929eb-e746-4253-9cf6-dcb0939da532-config\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.237507 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-oauth-serving-cert\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.237591 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-trusted-ca-bundle\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.237466 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.241229 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe6e645-a059-4073-ad43-ecd9a8bdac14-config\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.242336 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fw7xc"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.247093 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.251117 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ca380f6-b02f-4128-86f6-ff19d22c532d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.251684 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-audit\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.251873 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-image-import-ca\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.252415 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244b24d8-4734-4c9f-8ba3-ef7616581a58-config\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.253326 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/487b5d8d-0509-4510-8331-f36ac1bfe1a9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vfjn4\" (UID: \"487b5d8d-0509-4510-8331-f36ac1bfe1a9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.253495 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.254374 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca380f6-b02f-4128-86f6-ff19d22c532d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.257468 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1afda1fa-7adc-4d82-bd34-5743e3b89b52-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.291006 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-config\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.291570 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ace4935-baa0-4bdd-8ab6-f57415730959-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.294277 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4fa929eb-e746-4253-9cf6-dcb0939da532-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.294641 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.297664 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.297756 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/244b24d8-4734-4c9f-8ba3-ef7616581a58-audit-dir\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.297929 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4fa929eb-e746-4253-9cf6-dcb0939da532-images\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.297951 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.298219 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.298228 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-service-ca\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.299185 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-trusted-ca-bundle\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.302077 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-serving-cert\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.302599 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.302633 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/244b24d8-4734-4c9f-8ba3-ef7616581a58-node-pullsecrets\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.303470 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec9e32e6-39f8-4156-be3b-cda6046c017a-config\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.303480 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ace4935-baa0-4bdd-8ab6-f57415730959-config\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.304009 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0fe6e645-a059-4073-ad43-ecd9a8bdac14-serving-cert\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.304488 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-encryption-config\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.304724 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec9e32e6-39f8-4156-be3b-cda6046c017a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.304835 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.304935 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-serving-cert\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.305058 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0fe6e645-a059-4073-ad43-ecd9a8bdac14-trusted-ca\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.305195 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fsgh8"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.305423 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-oauth-config\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.305854 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.305966 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.305862 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.306988 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/244b24d8-4734-4c9f-8ba3-ef7616581a58-etcd-client\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.307057 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.307310 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.307330 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-client-ca\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.307562 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-serving-cert\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.307752 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.314959 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.315761 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.318062 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-dx4hg"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.319178 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.319625 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.319776 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1afda1fa-7adc-4d82-bd34-5743e3b89b52-serving-cert\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.319932 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.319953 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.323244 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.323687 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lvs6\" (UniqueName: \"kubernetes.io/projected/244b24d8-4734-4c9f-8ba3-ef7616581a58-kube-api-access-8lvs6\") pod \"apiserver-76f77b778f-84b8w\" (UID: \"244b24d8-4734-4c9f-8ba3-ef7616581a58\") " pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.324211 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328048 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6471a57b-f563-440e-9fa7-2c24af8039c9-serving-cert\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328084 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-dir\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328092 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328119 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328174 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-policies\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328200 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328222 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgz7c\" (UniqueName: \"kubernetes.io/projected/fca29e57-7eeb-48d7-a4d9-2125f65816f4-kube-api-access-jgz7c\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328241 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328272 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-dir\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328289 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328346 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328401 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328943 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.328982 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1a85f330-84c9-416c-9fcc-8e775a19599d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.329028 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f3b423f-a356-4d55-bc15-c476dd03d771-config\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.329065 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-client-ca\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.329115 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bll9z\" (UniqueName: \"kubernetes.io/projected/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-kube-api-access-bll9z\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.329137 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.329211 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.329947 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.330006 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bhs7s"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.330762 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-client-ca\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.331606 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f3b423f-a356-4d55-bc15-c476dd03d771-config\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332434 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-544kr\" (UniqueName: \"kubernetes.io/projected/6471a57b-f563-440e-9fa7-2c24af8039c9-kube-api-access-544kr\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332529 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332564 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6471a57b-f563-440e-9fa7-2c24af8039c9-serving-cert\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332587 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332653 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-config\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332664 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332696 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cjdm\" (UniqueName: \"kubernetes.io/projected/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-kube-api-access-8cjdm\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332725 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1a85f330-84c9-416c-9fcc-8e775a19599d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332761 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332793 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k7f2\" (UniqueName: \"kubernetes.io/projected/1a85f330-84c9-416c-9fcc-8e775a19599d-kube-api-access-6k7f2\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.333035 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-djhmv"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.333107 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-policies\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.333236 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.333898 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.334119 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.337620 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.332318 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.338500 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1a85f330-84c9-416c-9fcc-8e775a19599d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.338723 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.338816 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1a85f330-84c9-416c-9fcc-8e775a19599d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.338873 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4f3b423f-a356-4d55-bc15-c476dd03d771-auth-proxy-config\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.338921 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.338968 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/be55bab9-c006-41aa-9347-817555da5dcf-metrics-tls\") pod \"dns-operator-744455d44c-bcrg5\" (UID: \"be55bab9-c006-41aa-9347-817555da5dcf\") " pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339029 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339055 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339078 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339111 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tgqs\" (UniqueName: \"kubernetes.io/projected/4f3b423f-a356-4d55-bc15-c476dd03d771-kube-api-access-7tgqs\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339152 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fca29e57-7eeb-48d7-a4d9-2125f65816f4-serving-cert\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339178 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4f3b423f-a356-4d55-bc15-c476dd03d771-machine-approver-tls\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339208 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339272 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6wgw\" (UniqueName: \"kubernetes.io/projected/be55bab9-c006-41aa-9347-817555da5dcf-kube-api-access-s6wgw\") pod \"dns-operator-744455d44c-bcrg5\" (UID: \"be55bab9-c006-41aa-9347-817555da5dcf\") " pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339309 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-config\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339331 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-service-ca-bundle\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.339698 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.340677 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-service-ca-bundle\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.341748 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-config\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.342910 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4f3b423f-a356-4d55-bc15-c476dd03d771-auth-proxy-config\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.342990 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.343107 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.343806 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.343860 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/be55bab9-c006-41aa-9347-817555da5dcf-metrics-tls\") pod \"dns-operator-744455d44c-bcrg5\" (UID: \"be55bab9-c006-41aa-9347-817555da5dcf\") " pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.344225 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.344373 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.344623 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.344841 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.345282 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.345463 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.345930 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.346082 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca29e57-7eeb-48d7-a4d9-2125f65816f4-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.346531 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.346684 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.346935 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.347008 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlgtd"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.347992 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.348363 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.350140 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.350691 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.351114 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.351156 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-config\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.352162 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.352937 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.353944 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.354418 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5dbnv"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.355137 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.355769 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4f3b423f-a356-4d55-bc15-c476dd03d771-machine-approver-tls\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.356609 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fca29e57-7eeb-48d7-a4d9-2125f65816f4-serving-cert\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.357840 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.357985 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-84b8w"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.358735 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec9e32e6-39f8-4156-be3b-cda6046c017a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-vvb9q\" (UID: \"ec9e32e6-39f8-4156-be3b-cda6046c017a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.359063 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-9xs2f"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.362076 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1a85f330-84c9-416c-9fcc-8e775a19599d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.363910 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.365522 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.371202 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zwpd8"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.378664 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.382461 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.385276 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fdf5k"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.386889 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.388429 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.390665 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.393353 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-jltf4"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.395054 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.396664 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.397334 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.398715 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.400219 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fsgh8"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.402161 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-zdq5h"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.403071 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.405915 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.407149 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q4jxp"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.408382 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.409426 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.409561 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.410511 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-dx4hg"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.412002 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.412676 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.414477 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qrnzf"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.416152 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bhs7s"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.418881 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dkwtc"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.421602 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.421640 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fw7xc"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.421772 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.422114 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.422676 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.423773 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.425275 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.427234 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bcrg5"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.429218 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.429920 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.431666 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlgtd"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.433876 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dkwtc"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.435019 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.436155 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.437273 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-wgv95"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.437846 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.438495 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-9k5dc"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.439327 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.439492 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-wgv95"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.439576 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.440535 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9k5dc"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.441061 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.441291 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.441377 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.457965 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.478296 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.485820 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.498070 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.536705 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csvmj\" (UniqueName: \"kubernetes.io/projected/4fa929eb-e746-4253-9cf6-dcb0939da532-kube-api-access-csvmj\") pod \"machine-api-operator-5694c8668f-zdq5h\" (UID: \"4fa929eb-e746-4253-9cf6-dcb0939da532\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.558131 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ace4935-baa0-4bdd-8ab6-f57415730959-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wj7x7\" (UID: \"2ace4935-baa0-4bdd-8ab6-f57415730959\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.589390 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vct7d\" (UniqueName: \"kubernetes.io/projected/db02dfdf-9c10-4e70-80e6-29385127d7d7-kube-api-access-vct7d\") pod \"downloads-7954f5f757-jltf4\" (UID: \"db02dfdf-9c10-4e70-80e6-29385127d7d7\") " pod="openshift-console/downloads-7954f5f757-jltf4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.602419 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tss6v\" (UniqueName: \"kubernetes.io/projected/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-kube-api-access-tss6v\") pod \"controller-manager-879f6c89f-zwpd8\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.611700 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.614613 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrs45\" (UniqueName: \"kubernetes.io/projected/1afda1fa-7adc-4d82-bd34-5743e3b89b52-kube-api-access-hrs45\") pod \"openshift-config-operator-7777fb866f-fh6qn\" (UID: \"1afda1fa-7adc-4d82-bd34-5743e3b89b52\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.627708 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.649339 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgxbg\" (UniqueName: \"kubernetes.io/projected/487b5d8d-0509-4510-8331-f36ac1bfe1a9-kube-api-access-hgxbg\") pod \"cluster-samples-operator-665b6dd947-vfjn4\" (UID: \"487b5d8d-0509-4510-8331-f36ac1bfe1a9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.656228 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.661572 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5q52\" (UniqueName: \"kubernetes.io/projected/c3bd5c0e-2656-4237-a9ab-e4de84101595-kube-api-access-f5q52\") pod \"console-f9d7485db-5dbnv\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.674325 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.676111 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nf4k\" (UniqueName: \"kubernetes.io/projected/0fe6e645-a059-4073-ad43-ecd9a8bdac14-kube-api-access-7nf4k\") pod \"console-operator-58897d9998-fdf5k\" (UID: \"0fe6e645-a059-4073-ad43-ecd9a8bdac14\") " pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.694384 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.697795 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.698266 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgqqk\" (UniqueName: \"kubernetes.io/projected/2ca380f6-b02f-4128-86f6-ff19d22c532d-kube-api-access-rgqqk\") pod \"openshift-controller-manager-operator-756b6f6bc6-pslnn\" (UID: \"2ca380f6-b02f-4128-86f6-ff19d22c532d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.720370 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.745718 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.749098 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-84b8w"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.751602 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-jltf4" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.757118 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.769931 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q"] Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.790881 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.797763 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.821703 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.839846 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.845644 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.859438 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.878459 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.898013 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.919991 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.942097 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.960521 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.967690 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.977407 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 12:48:50 crc kubenswrapper[4848]: I0128 12:48:50.978725 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.000188 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.018002 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.038011 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.056737 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.058775 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-zdq5h"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.060995 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.094731 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.102107 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.118712 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.122746 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.144476 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.163595 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.182497 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.198663 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.220079 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.238849 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.257558 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.277455 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.298225 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zwpd8"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.321767 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.335438 4848 request.go:700] Waited for 1.010826601s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-controller-dockercfg-c2lfx&limit=500&resourceVersion=0 Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.343482 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.378849 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5dbnv"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.383779 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.384663 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-jltf4"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.393099 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.404788 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.407116 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgz7c\" (UniqueName: \"kubernetes.io/projected/fca29e57-7eeb-48d7-a4d9-2125f65816f4-kube-api-access-jgz7c\") pod \"authentication-operator-69f744f599-q4jxp\" (UID: \"fca29e57-7eeb-48d7-a4d9-2125f65816f4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.420088 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.441806 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: W0128 12:48:51.443533 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb02dfdf_9c10_4e70_80e6_29385127d7d7.slice/crio-5a5bdcd3982af5e42d8bc271f58ca701ec451bbb3344c67abe5aaf8dd247ce3b WatchSource:0}: Error finding container 5a5bdcd3982af5e42d8bc271f58ca701ec451bbb3344c67abe5aaf8dd247ce3b: Status 404 returned error can't find the container with id 5a5bdcd3982af5e42d8bc271f58ca701ec451bbb3344c67abe5aaf8dd247ce3b Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.454442 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fdf5k"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.454501 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7"] Jan 28 12:48:51 crc kubenswrapper[4848]: W0128 12:48:51.458110 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1afda1fa_7adc_4d82_bd34_5743e3b89b52.slice/crio-9b24fea99a93877aef2161291552039766960cf13700566ca1a839f1bbc3b0d8 WatchSource:0}: Error finding container 9b24fea99a93877aef2161291552039766960cf13700566ca1a839f1bbc3b0d8: Status 404 returned error can't find the container with id 9b24fea99a93877aef2161291552039766960cf13700566ca1a839f1bbc3b0d8 Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.470238 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.491670 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bll9z\" (UniqueName: \"kubernetes.io/projected/16e0f86b-6544-44ba-b415-0cb5a6c5a0d7-kube-api-access-bll9z\") pod \"openshift-apiserver-operator-796bbdcf4f-f4jt4\" (UID: \"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.509045 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-544kr\" (UniqueName: \"kubernetes.io/projected/6471a57b-f563-440e-9fa7-2c24af8039c9-kube-api-access-544kr\") pod \"route-controller-manager-6576b87f9c-j4xw5\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.518306 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cjdm\" (UniqueName: \"kubernetes.io/projected/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-kube-api-access-8cjdm\") pod \"oauth-openshift-558db77b4-qrnzf\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.537592 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1a85f330-84c9-416c-9fcc-8e775a19599d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.561957 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.577565 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k7f2\" (UniqueName: \"kubernetes.io/projected/1a85f330-84c9-416c-9fcc-8e775a19599d-kube-api-access-6k7f2\") pod \"cluster-image-registry-operator-dc59b4c8b-8tnwl\" (UID: \"1a85f330-84c9-416c-9fcc-8e775a19599d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.577862 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.597914 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.601448 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.617513 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.637325 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.637354 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.652513 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.658321 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.659733 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.676767 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.678328 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.697999 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.717671 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.739107 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.748157 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" event={"ID":"ec9e32e6-39f8-4156-be3b-cda6046c017a","Type":"ContainerStarted","Data":"c1a9a8c1850466cb0eb3087e620f1a936c6c2bfe17e6ff9dcebbc465fe04c15d"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.748227 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" event={"ID":"ec9e32e6-39f8-4156-be3b-cda6046c017a","Type":"ContainerStarted","Data":"1228321fa4f5a49d141e7ccc3d3ddd68c1a1b78b78524c865e6e078e33990e4c"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.758534 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.762940 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" event={"ID":"1afda1fa-7adc-4d82-bd34-5743e3b89b52","Type":"ContainerStarted","Data":"d9940b885f8a4084da5e70832f0b29006a74858e20a02dc38f7903474a98f733"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.763011 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" event={"ID":"1afda1fa-7adc-4d82-bd34-5743e3b89b52","Type":"ContainerStarted","Data":"9b24fea99a93877aef2161291552039766960cf13700566ca1a839f1bbc3b0d8"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.764922 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" event={"ID":"487b5d8d-0509-4510-8331-f36ac1bfe1a9","Type":"ContainerStarted","Data":"79d0ac066fad542e2e353e1ee01eef37a6febfd76f95c90400123101fbfadd1b"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.764948 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" event={"ID":"487b5d8d-0509-4510-8331-f36ac1bfe1a9","Type":"ContainerStarted","Data":"23236508041fa9192121c9de7c1dd2fcdd7081ea32719b5f3bf0236e668b0eab"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.764962 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" event={"ID":"487b5d8d-0509-4510-8331-f36ac1bfe1a9","Type":"ContainerStarted","Data":"2d4a73b79ea737aaad02195162d1eac133cbb1283ddd6f631250edb08f5b31a9"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.766825 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" event={"ID":"2ace4935-baa0-4bdd-8ab6-f57415730959","Type":"ContainerStarted","Data":"44f9810a151609b8b6950d2e5e76360a0d24774c6a7f58deaf7fdf267420a887"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.768867 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" event={"ID":"2ca380f6-b02f-4128-86f6-ff19d22c532d","Type":"ContainerStarted","Data":"b943af623c5e2830296487f4c59d550fd9099f1bff7b0cc1966c47e7cff8dd32"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.771151 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5dbnv" event={"ID":"c3bd5c0e-2656-4237-a9ab-e4de84101595","Type":"ContainerStarted","Data":"67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.771184 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5dbnv" event={"ID":"c3bd5c0e-2656-4237-a9ab-e4de84101595","Type":"ContainerStarted","Data":"3f260dcacec5987142fccfdcca394c9fa6390b46bc7ef4e9346fbff371371653"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.773451 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-jltf4" event={"ID":"db02dfdf-9c10-4e70-80e6-29385127d7d7","Type":"ContainerStarted","Data":"994c03b14253c3f1e14434c74f2a3dcf164a1baba1dcbc55fc8a96f7dc087caa"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.773507 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-jltf4" event={"ID":"db02dfdf-9c10-4e70-80e6-29385127d7d7","Type":"ContainerStarted","Data":"5a5bdcd3982af5e42d8bc271f58ca701ec451bbb3344c67abe5aaf8dd247ce3b"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.784897 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" event={"ID":"365c6d6e-80a0-4818-b1dc-093bddc9a5a4","Type":"ContainerStarted","Data":"09823452889d46ebb3edea438265776604c65d6a20e04d9b1d5850799c5eaa06"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.784949 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" event={"ID":"365c6d6e-80a0-4818-b1dc-093bddc9a5a4","Type":"ContainerStarted","Data":"5a8903b915dd723dce5b33e2f73d115a6dd5dc0017b5f955aab10852601e810c"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.785565 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.786331 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.796477 4848 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zwpd8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.796562 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" podUID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.807766 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" event={"ID":"4fa929eb-e746-4253-9cf6-dcb0939da532","Type":"ContainerStarted","Data":"c949f9101042ab74c71eaea6942e1428f81e4d114c0c1de17d573dc70c49769b"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.807821 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" event={"ID":"4fa929eb-e746-4253-9cf6-dcb0939da532","Type":"ContainerStarted","Data":"46c24488ec7b89bdad7d8b64eed435a5c98cee745d4fb90c7928db6a673cfabe"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.807830 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" event={"ID":"4fa929eb-e746-4253-9cf6-dcb0939da532","Type":"ContainerStarted","Data":"7f375ecffd42f76dbf2ee1df410884e47529e4b9f521d14d542ae63940eecf0d"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.835845 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tgqs\" (UniqueName: \"kubernetes.io/projected/4f3b423f-a356-4d55-bc15-c476dd03d771-kube-api-access-7tgqs\") pod \"machine-approver-56656f9798-rg97d\" (UID: \"4f3b423f-a356-4d55-bc15-c476dd03d771\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.838175 4848 generic.go:334] "Generic (PLEG): container finished" podID="244b24d8-4734-4c9f-8ba3-ef7616581a58" containerID="60263775f4fee8bd0118c51eeea4b4b1b3ea31d0b9c729d793243fda09b47300" exitCode=0 Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.838299 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" event={"ID":"244b24d8-4734-4c9f-8ba3-ef7616581a58","Type":"ContainerDied","Data":"60263775f4fee8bd0118c51eeea4b4b1b3ea31d0b9c729d793243fda09b47300"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.838343 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" event={"ID":"244b24d8-4734-4c9f-8ba3-ef7616581a58","Type":"ContainerStarted","Data":"757b480347b1da21aaaa6572087a06de52356ce4abea4fe78b757d6486a729a8"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.854402 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.858629 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.858757 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" event={"ID":"0fe6e645-a059-4073-ad43-ecd9a8bdac14","Type":"ContainerStarted","Data":"21fbded366933e7c26309cc84b5026859f40bee07e1e62442e883326bbec026f"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.858815 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" event={"ID":"0fe6e645-a059-4073-ad43-ecd9a8bdac14","Type":"ContainerStarted","Data":"f897c21846bcdc98890bddd5f296555eb4cc3e245a5eeb5e6a69de4c8bd95608"} Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.867516 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6wgw\" (UniqueName: \"kubernetes.io/projected/be55bab9-c006-41aa-9347-817555da5dcf-kube-api-access-s6wgw\") pod \"dns-operator-744455d44c-bcrg5\" (UID: \"be55bab9-c006-41aa-9347-817555da5dcf\") " pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.868316 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.878199 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.889754 4848 patch_prober.go:28] interesting pod/console-operator-58897d9998-fdf5k container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.889882 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" podUID="0fe6e645-a059-4073-ad43-ecd9a8bdac14" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.901586 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.913884 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5"] Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.919136 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.929237 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.959135 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.959819 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.975523 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.982362 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 12:48:51 crc kubenswrapper[4848]: I0128 12:48:51.997758 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.036501 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.045535 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.066443 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.078983 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.097686 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.119048 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.128886 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4"] Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.137144 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.158365 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.178851 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.201824 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.220786 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.221272 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl"] Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.240892 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 12:48:52 crc kubenswrapper[4848]: W0128 12:48:52.274526 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a85f330_84c9_416c_9fcc_8e775a19599d.slice/crio-8602f1e327d26d379d346cc4af591817ab423c7fa94904be1663a278fd4149c1 WatchSource:0}: Error finding container 8602f1e327d26d379d346cc4af591817ab423c7fa94904be1663a278fd4149c1: Status 404 returned error can't find the container with id 8602f1e327d26d379d346cc4af591817ab423c7fa94904be1663a278fd4149c1 Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.275196 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q4jxp"] Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.277646 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.277946 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.297458 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.332679 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.336536 4848 request.go:700] Waited for 1.971815449s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-dockercfg-qx5rd&limit=500&resourceVersion=0 Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.339765 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.357797 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: W0128 12:48:52.359995 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfca29e57_7eeb_48d7_a4d9_2125f65816f4.slice/crio-4419fdd1db18f47562c27d426f8129051b3f95abed376d1726e30a133fbd80fa WatchSource:0}: Error finding container 4419fdd1db18f47562c27d426f8129051b3f95abed376d1726e30a133fbd80fa: Status 404 returned error can't find the container with id 4419fdd1db18f47562c27d426f8129051b3f95abed376d1726e30a133fbd80fa Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.378544 4848 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.380597 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qrnzf"] Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.398960 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.418344 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.454182 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.467165 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.483020 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.500306 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.524765 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.536585 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bcrg5"] Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.538643 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 12:48:52 crc kubenswrapper[4848]: W0128 12:48:52.594873 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe55bab9_c006_41aa_9347_817555da5dcf.slice/crio-291e2932587b0c9326655af6a27e7a8517967db621add61ff2fa333dfd1531b5 WatchSource:0}: Error finding container 291e2932587b0c9326655af6a27e7a8517967db621add61ff2fa333dfd1531b5: Status 404 returned error can't find the container with id 291e2932587b0c9326655af6a27e7a8517967db621add61ff2fa333dfd1531b5 Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.595076 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c4f39e-68b1-4abb-9e40-553abbce5d0f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-22cds\" (UID: \"e9c4f39e-68b1-4abb-9e40-553abbce5d0f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.603267 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.603473 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1929eb16-0432-46a9-871d-3a2d75f37d7a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.603552 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: E0128 12:48:52.605570 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.105549544 +0000 UTC m=+160.017766652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.606637 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-etcd-client\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.607543 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-trusted-ca\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.608383 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-bound-sa-token\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.608434 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-metrics-tls\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.608457 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.608564 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-trusted-ca\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.608604 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1929eb16-0432-46a9-871d-3a2d75f37d7a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.610908 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbpqh\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-kube-api-access-pbpqh\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.610951 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-audit-policies\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.613081 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlphw\" (UniqueName: \"kubernetes.io/projected/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-kube-api-access-dlphw\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.613218 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-tls\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.613391 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-certificates\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.613468 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-serving-cert\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.641714 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.714244 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.714442 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.714476 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stcmv\" (UniqueName: \"kubernetes.io/projected/8e60e303-4483-44cb-b22f-f2cce7f9882d-kube-api-access-stcmv\") pod \"multus-admission-controller-857f4d67dd-dx4hg\" (UID: \"8e60e303-4483-44cb-b22f-f2cce7f9882d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718310 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4fvt\" (UniqueName: \"kubernetes.io/projected/33884de2-1133-47f6-b1c3-e49151ad1b54-kube-api-access-g4fvt\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718384 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq6bm\" (UniqueName: \"kubernetes.io/projected/2d8c0888-72ad-42f4-993a-47ffb613e406-kube-api-access-pq6bm\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718445 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-registration-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718473 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1929eb16-0432-46a9-871d-3a2d75f37d7a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718580 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f20bd200-d99d-423d-b650-9095869b9a8b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718607 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f0bcbb8b-0e11-45fc-a632-2d869e47c651-tmpfs\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718628 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9e228a32-b6c0-4796-9e16-ad8fce227175-signing-cabundle\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718651 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f20bd200-d99d-423d-b650-9095869b9a8b-images\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718700 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-metrics-tls\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718734 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-service-ca-bundle\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718767 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f0bcbb8b-0e11-45fc-a632-2d869e47c651-apiservice-cert\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718795 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7tqv\" (UniqueName: \"kubernetes.io/projected/acf04d74-cef0-4425-9599-bbaf3fdf0374-kube-api-access-d7tqv\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718818 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvrrx\" (UniqueName: \"kubernetes.io/projected/67e8427e-1a00-4ee0-a364-badb08b1cd8d-kube-api-access-mvrrx\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718861 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-audit-policies\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.718917 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c290ae4-345a-4877-b020-2d64197e1bf2-config-volume\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720528 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2dff4bcb-d09c-4094-8cdf-35f0e7e51176-cert\") pod \"ingress-canary-9k5dc\" (UID: \"2dff4bcb-d09c-4094-8cdf-35f0e7e51176\") " pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720641 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/33884de2-1133-47f6-b1c3-e49151ad1b54-srv-cert\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720682 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acf04d74-cef0-4425-9599-bbaf3fdf0374-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720713 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-config\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720769 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720799 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdw7n\" (UniqueName: \"kubernetes.io/projected/f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4-kube-api-access-sdw7n\") pod \"package-server-manager-789f6589d5-jvwtg\" (UID: \"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720858 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8xz4\" (UniqueName: \"kubernetes.io/projected/f0bcbb8b-0e11-45fc-a632-2d869e47c651-kube-api-access-m8xz4\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720879 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbzfx\" (UniqueName: \"kubernetes.io/projected/970fc81c-bb86-43f0-a598-067a8d9febe9-kube-api-access-mbzfx\") pod \"migrator-59844c95c7-rtg68\" (UID: \"970fc81c-bb86-43f0-a598-067a8d9febe9\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720901 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f20bd200-d99d-423d-b650-9095869b9a8b-proxy-tls\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720924 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9417b0a9-6016-419f-bf13-94812e88ca91-serving-cert\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.720971 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-certificates\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721254 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721302 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8e60e303-4483-44cb-b22f-f2cce7f9882d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-dx4hg\" (UID: \"8e60e303-4483-44cb-b22f-f2cce7f9882d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721331 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7dnl\" (UniqueName: \"kubernetes.io/projected/2dff4bcb-d09c-4094-8cdf-35f0e7e51176-kube-api-access-d7dnl\") pod \"ingress-canary-9k5dc\" (UID: \"2dff4bcb-d09c-4094-8cdf-35f0e7e51176\") " pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721355 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721389 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-default-certificate\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721412 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86gr6\" (UniqueName: \"kubernetes.io/projected/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-kube-api-access-86gr6\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721438 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acf04d74-cef0-4425-9599-bbaf3fdf0374-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721458 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-socket-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721476 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-mountpoint-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721509 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a78c59e1-105e-4581-a0bb-27c1d78dbdee-secret-volume\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721547 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pfwp\" (UniqueName: \"kubernetes.io/projected/f20bd200-d99d-423d-b650-9095869b9a8b-kube-api-access-9pfwp\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721566 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/33884de2-1133-47f6-b1c3-e49151ad1b54-profile-collector-cert\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721583 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-proxy-tls\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721644 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-service-ca\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721666 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9e228a32-b6c0-4796-9e16-ad8fce227175-signing-key\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721752 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmb8v\" (UniqueName: \"kubernetes.io/projected/227649b8-fbe1-4426-a63b-1a4f13700bba-kube-api-access-nmb8v\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721785 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f0bcbb8b-0e11-45fc-a632-2d869e47c651-webhook-cert\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721803 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-csi-data-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721845 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/227649b8-fbe1-4426-a63b-1a4f13700bba-srv-cert\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721891 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-etcd-client\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721936 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-trusted-ca\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: E0128 12:48:52.721993 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.221958039 +0000 UTC m=+160.134175077 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722056 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8crr\" (UniqueName: \"kubernetes.io/projected/3daae941-7347-4673-8fef-20c2785a8cd6-kube-api-access-l8crr\") pod \"control-plane-machine-set-operator-78cbb6b69f-p4g9c\" (UID: \"3daae941-7347-4673-8fef-20c2785a8cd6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722099 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-metrics-certs\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722171 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-bound-sa-token\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722194 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722213 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fcjr\" (UniqueName: \"kubernetes.io/projected/265f5d9c-2988-4a0b-9353-b426422d4c48-kube-api-access-4fcjr\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722270 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-trusted-ca\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722304 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz6c5\" (UniqueName: \"kubernetes.io/projected/9004f250-91b4-440a-b7a1-60b81c8070fa-kube-api-access-lz6c5\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722339 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1929eb16-0432-46a9-871d-3a2d75f37d7a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722523 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c290ae4-345a-4877-b020-2d64197e1bf2-metrics-tls\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722589 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbpqh\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-kube-api-access-pbpqh\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722612 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-stats-auth\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722636 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plnqm\" (UniqueName: \"kubernetes.io/projected/9417b0a9-6016-419f-bf13-94812e88ca91-kube-api-access-plnqm\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722662 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqsmz\" (UniqueName: \"kubernetes.io/projected/9e228a32-b6c0-4796-9e16-ad8fce227175-kube-api-access-zqsmz\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722687 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j8zg\" (UniqueName: \"kubernetes.io/projected/3c290ae4-345a-4877-b020-2d64197e1bf2-kube-api-access-5j8zg\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722723 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3daae941-7347-4673-8fef-20c2785a8cd6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-p4g9c\" (UID: \"3daae941-7347-4673-8fef-20c2785a8cd6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722776 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a78c59e1-105e-4581-a0bb-27c1d78dbdee-config-volume\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722809 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/227649b8-fbe1-4426-a63b-1a4f13700bba-profile-collector-cert\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.722932 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9417b0a9-6016-419f-bf13-94812e88ca91-config\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.723837 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-client\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.723868 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/2d8c0888-72ad-42f4-993a-47ffb613e406-certs\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.723888 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vk86\" (UniqueName: \"kubernetes.io/projected/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-kube-api-access-7vk86\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.724165 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-trusted-ca\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.721144 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.726771 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.727342 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1929eb16-0432-46a9-871d-3a2d75f37d7a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.728544 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-trusted-ca\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.728638 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9004f250-91b4-440a-b7a1-60b81c8070fa-serving-cert\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.728709 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/2d8c0888-72ad-42f4-993a-47ffb613e406-node-bootstrap-token\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729431 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-audit-policies\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729513 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-ca\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729550 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729613 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlphw\" (UniqueName: \"kubernetes.io/projected/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-kube-api-access-dlphw\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729754 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-tls\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729861 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjj2r\" (UniqueName: \"kubernetes.io/projected/a78c59e1-105e-4581-a0bb-27c1d78dbdee-kube-api-access-wjj2r\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729902 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-encryption-config\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729933 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-plugins-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.729990 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jvwtg\" (UID: \"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.731153 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/265f5d9c-2988-4a0b-9353-b426422d4c48-audit-dir\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.731264 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-serving-cert\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.731357 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fxm2\" (UniqueName: \"kubernetes.io/projected/0d553491-aa2a-495d-b02c-73a52d29278b-kube-api-access-4fxm2\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.731611 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-certificates\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.741462 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-etcd-client\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.742919 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-tls\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.743745 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-serving-cert\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.747308 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-metrics-tls\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.752843 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1929eb16-0432-46a9-871d-3a2d75f37d7a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.755396 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-bound-sa-token\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.785164 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbpqh\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-kube-api-access-pbpqh\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.795979 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.819088 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlphw\" (UniqueName: \"kubernetes.io/projected/dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390-kube-api-access-dlphw\") pod \"ingress-operator-5b745b69d9-xh78v\" (UID: \"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833680 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz6c5\" (UniqueName: \"kubernetes.io/projected/9004f250-91b4-440a-b7a1-60b81c8070fa-kube-api-access-lz6c5\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833745 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-stats-auth\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833771 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plnqm\" (UniqueName: \"kubernetes.io/projected/9417b0a9-6016-419f-bf13-94812e88ca91-kube-api-access-plnqm\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833797 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c290ae4-345a-4877-b020-2d64197e1bf2-metrics-tls\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833823 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqsmz\" (UniqueName: \"kubernetes.io/projected/9e228a32-b6c0-4796-9e16-ad8fce227175-kube-api-access-zqsmz\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833848 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j8zg\" (UniqueName: \"kubernetes.io/projected/3c290ae4-345a-4877-b020-2d64197e1bf2-kube-api-access-5j8zg\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833876 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3daae941-7347-4673-8fef-20c2785a8cd6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-p4g9c\" (UID: \"3daae941-7347-4673-8fef-20c2785a8cd6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833905 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a78c59e1-105e-4581-a0bb-27c1d78dbdee-config-volume\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833941 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/227649b8-fbe1-4426-a63b-1a4f13700bba-profile-collector-cert\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833960 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9417b0a9-6016-419f-bf13-94812e88ca91-config\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833975 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9004f250-91b4-440a-b7a1-60b81c8070fa-serving-cert\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.833990 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-client\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.834008 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/2d8c0888-72ad-42f4-993a-47ffb613e406-certs\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.834025 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vk86\" (UniqueName: \"kubernetes.io/projected/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-kube-api-access-7vk86\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.834807 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/2d8c0888-72ad-42f4-993a-47ffb613e406-node-bootstrap-token\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.834842 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-ca\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.834868 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.834907 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjj2r\" (UniqueName: \"kubernetes.io/projected/a78c59e1-105e-4581-a0bb-27c1d78dbdee-kube-api-access-wjj2r\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.834938 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-encryption-config\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835075 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-plugins-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835103 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/265f5d9c-2988-4a0b-9353-b426422d4c48-audit-dir\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835130 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jvwtg\" (UID: \"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835159 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fxm2\" (UniqueName: \"kubernetes.io/projected/0d553491-aa2a-495d-b02c-73a52d29278b-kube-api-access-4fxm2\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835190 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stcmv\" (UniqueName: \"kubernetes.io/projected/8e60e303-4483-44cb-b22f-f2cce7f9882d-kube-api-access-stcmv\") pod \"multus-admission-controller-857f4d67dd-dx4hg\" (UID: \"8e60e303-4483-44cb-b22f-f2cce7f9882d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835301 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-registration-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835331 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4fvt\" (UniqueName: \"kubernetes.io/projected/33884de2-1133-47f6-b1c3-e49151ad1b54-kube-api-access-g4fvt\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835355 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq6bm\" (UniqueName: \"kubernetes.io/projected/2d8c0888-72ad-42f4-993a-47ffb613e406-kube-api-access-pq6bm\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835389 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f20bd200-d99d-423d-b650-9095869b9a8b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835413 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9e228a32-b6c0-4796-9e16-ad8fce227175-signing-cabundle\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835436 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f0bcbb8b-0e11-45fc-a632-2d869e47c651-tmpfs\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835461 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f20bd200-d99d-423d-b650-9095869b9a8b-images\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835486 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f0bcbb8b-0e11-45fc-a632-2d869e47c651-apiservice-cert\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835511 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-service-ca-bundle\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835539 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7tqv\" (UniqueName: \"kubernetes.io/projected/acf04d74-cef0-4425-9599-bbaf3fdf0374-kube-api-access-d7tqv\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835563 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvrrx\" (UniqueName: \"kubernetes.io/projected/67e8427e-1a00-4ee0-a364-badb08b1cd8d-kube-api-access-mvrrx\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835588 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c290ae4-345a-4877-b020-2d64197e1bf2-config-volume\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835638 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2dff4bcb-d09c-4094-8cdf-35f0e7e51176-cert\") pod \"ingress-canary-9k5dc\" (UID: \"2dff4bcb-d09c-4094-8cdf-35f0e7e51176\") " pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835662 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/33884de2-1133-47f6-b1c3-e49151ad1b54-srv-cert\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835689 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acf04d74-cef0-4425-9599-bbaf3fdf0374-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835709 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-config\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835727 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdw7n\" (UniqueName: \"kubernetes.io/projected/f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4-kube-api-access-sdw7n\") pod \"package-server-manager-789f6589d5-jvwtg\" (UID: \"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835744 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835765 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8xz4\" (UniqueName: \"kubernetes.io/projected/f0bcbb8b-0e11-45fc-a632-2d869e47c651-kube-api-access-m8xz4\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835783 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835802 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbzfx\" (UniqueName: \"kubernetes.io/projected/970fc81c-bb86-43f0-a598-067a8d9febe9-kube-api-access-mbzfx\") pod \"migrator-59844c95c7-rtg68\" (UID: \"970fc81c-bb86-43f0-a598-067a8d9febe9\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835818 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f20bd200-d99d-423d-b650-9095869b9a8b-proxy-tls\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835836 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9417b0a9-6016-419f-bf13-94812e88ca91-serving-cert\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835855 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8e60e303-4483-44cb-b22f-f2cce7f9882d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-dx4hg\" (UID: \"8e60e303-4483-44cb-b22f-f2cce7f9882d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835871 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7dnl\" (UniqueName: \"kubernetes.io/projected/2dff4bcb-d09c-4094-8cdf-35f0e7e51176-kube-api-access-d7dnl\") pod \"ingress-canary-9k5dc\" (UID: \"2dff4bcb-d09c-4094-8cdf-35f0e7e51176\") " pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835887 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86gr6\" (UniqueName: \"kubernetes.io/projected/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-kube-api-access-86gr6\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835904 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acf04d74-cef0-4425-9599-bbaf3fdf0374-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835922 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835939 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-default-certificate\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835955 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a78c59e1-105e-4581-a0bb-27c1d78dbdee-secret-volume\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835970 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-socket-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.835986 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-mountpoint-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836003 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/33884de2-1133-47f6-b1c3-e49151ad1b54-profile-collector-cert\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836018 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-proxy-tls\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836034 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pfwp\" (UniqueName: \"kubernetes.io/projected/f20bd200-d99d-423d-b650-9095869b9a8b-kube-api-access-9pfwp\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836053 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-service-ca\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836071 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9e228a32-b6c0-4796-9e16-ad8fce227175-signing-key\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836098 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836121 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/227649b8-fbe1-4426-a63b-1a4f13700bba-srv-cert\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836137 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmb8v\" (UniqueName: \"kubernetes.io/projected/227649b8-fbe1-4426-a63b-1a4f13700bba-kube-api-access-nmb8v\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836151 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f0bcbb8b-0e11-45fc-a632-2d869e47c651-webhook-cert\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836167 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-csi-data-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836204 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8crr\" (UniqueName: \"kubernetes.io/projected/3daae941-7347-4673-8fef-20c2785a8cd6-kube-api-access-l8crr\") pod \"control-plane-machine-set-operator-78cbb6b69f-p4g9c\" (UID: \"3daae941-7347-4673-8fef-20c2785a8cd6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836222 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fcjr\" (UniqueName: \"kubernetes.io/projected/265f5d9c-2988-4a0b-9353-b426422d4c48-kube-api-access-4fcjr\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.836241 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-metrics-certs\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.839942 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3daae941-7347-4673-8fef-20c2785a8cd6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-p4g9c\" (UID: \"3daae941-7347-4673-8fef-20c2785a8cd6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.841127 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-socket-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.842126 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-config\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.844931 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.845475 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-mountpoint-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.845821 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c290ae4-345a-4877-b020-2d64197e1bf2-config-volume\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.845938 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acf04d74-cef0-4425-9599-bbaf3fdf0374-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.846432 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/265f5d9c-2988-4a0b-9353-b426422d4c48-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.846688 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-service-ca-bundle\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.847100 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9004f250-91b4-440a-b7a1-60b81c8070fa-serving-cert\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.847835 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9417b0a9-6016-419f-bf13-94812e88ca91-config\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.849746 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f0bcbb8b-0e11-45fc-a632-2d869e47c651-tmpfs\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.850492 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9e228a32-b6c0-4796-9e16-ad8fce227175-signing-cabundle\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.852352 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a78c59e1-105e-4581-a0bb-27c1d78dbdee-config-volume\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.857358 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-service-ca\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.862774 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.864513 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-registration-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.865068 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f20bd200-d99d-423d-b650-9095869b9a8b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.865443 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c290ae4-345a-4877-b020-2d64197e1bf2-metrics-tls\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.865868 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-metrics-certs\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.866569 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a78c59e1-105e-4581-a0bb-27c1d78dbdee-secret-volume\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.866899 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f20bd200-d99d-423d-b650-9095869b9a8b-images\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.867705 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/265f5d9c-2988-4a0b-9353-b426422d4c48-audit-dir\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.867745 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-plugins-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.868893 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/67e8427e-1a00-4ee0-a364-badb08b1cd8d-csi-data-dir\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:52 crc kubenswrapper[4848]: E0128 12:48:52.869957 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.369936682 +0000 UTC m=+160.282153710 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.871216 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9e228a32-b6c0-4796-9e16-ad8fce227175-signing-key\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.871769 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-ca\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.876223 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-proxy-tls\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.878386 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f0bcbb8b-0e11-45fc-a632-2d869e47c651-apiservice-cert\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.885890 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/227649b8-fbe1-4426-a63b-1a4f13700bba-srv-cert\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.896309 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/2d8c0888-72ad-42f4-993a-47ffb613e406-node-bootstrap-token\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.897295 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f20bd200-d99d-423d-b650-9095869b9a8b-proxy-tls\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.897345 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2dff4bcb-d09c-4094-8cdf-35f0e7e51176-cert\") pod \"ingress-canary-9k5dc\" (UID: \"2dff4bcb-d09c-4094-8cdf-35f0e7e51176\") " pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.897413 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.897718 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/2d8c0888-72ad-42f4-993a-47ffb613e406-certs\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.898076 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/33884de2-1133-47f6-b1c3-e49151ad1b54-profile-collector-cert\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.898186 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/227649b8-fbe1-4426-a63b-1a4f13700bba-profile-collector-cert\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.899312 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f0bcbb8b-0e11-45fc-a632-2d869e47c651-webhook-cert\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.899944 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-stats-auth\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.900297 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/33884de2-1133-47f6-b1c3-e49151ad1b54-srv-cert\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.901097 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jvwtg\" (UID: \"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.901402 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.902260 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9417b0a9-6016-419f-bf13-94812e88ca91-serving-cert\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.904458 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8e60e303-4483-44cb-b22f-f2cce7f9882d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-dx4hg\" (UID: \"8e60e303-4483-44cb-b22f-f2cce7f9882d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.906902 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz6c5\" (UniqueName: \"kubernetes.io/projected/9004f250-91b4-440a-b7a1-60b81c8070fa-kube-api-access-lz6c5\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.908649 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-default-certificate\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.918420 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqsmz\" (UniqueName: \"kubernetes.io/projected/9e228a32-b6c0-4796-9e16-ad8fce227175-kube-api-access-zqsmz\") pod \"service-ca-9c57cc56f-bhs7s\" (UID: \"9e228a32-b6c0-4796-9e16-ad8fce227175\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.921841 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/265f5d9c-2988-4a0b-9353-b426422d4c48-encryption-config\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.922396 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9004f250-91b4-440a-b7a1-60b81c8070fa-etcd-client\") pod \"etcd-operator-b45778765-fw7xc\" (UID: \"9004f250-91b4-440a-b7a1-60b81c8070fa\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.933590 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acf04d74-cef0-4425-9599-bbaf3fdf0374-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.933588 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j8zg\" (UniqueName: \"kubernetes.io/projected/3c290ae4-345a-4877-b020-2d64197e1bf2-kube-api-access-5j8zg\") pod \"dns-default-wgv95\" (UID: \"3c290ae4-345a-4877-b020-2d64197e1bf2\") " pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.937154 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plnqm\" (UniqueName: \"kubernetes.io/projected/9417b0a9-6016-419f-bf13-94812e88ca91-kube-api-access-plnqm\") pod \"service-ca-operator-777779d784-xd2dm\" (UID: \"9417b0a9-6016-419f-bf13-94812e88ca91\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.938043 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:52 crc kubenswrapper[4848]: E0128 12:48:52.938652 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.438629309 +0000 UTC m=+160.350846347 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.959292 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdw7n\" (UniqueName: \"kubernetes.io/projected/f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4-kube-api-access-sdw7n\") pod \"package-server-manager-789f6589d5-jvwtg\" (UID: \"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.965222 4848 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-j4xw5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.965309 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.966355 4848 generic.go:334] "Generic (PLEG): container finished" podID="1afda1fa-7adc-4d82-bd34-5743e3b89b52" containerID="d9940b885f8a4084da5e70832f0b29006a74858e20a02dc38f7903474a98f733" exitCode=0 Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.985641 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stcmv\" (UniqueName: \"kubernetes.io/projected/8e60e303-4483-44cb-b22f-f2cce7f9882d-kube-api-access-stcmv\") pod \"multus-admission-controller-857f4d67dd-dx4hg\" (UID: \"8e60e303-4483-44cb-b22f-f2cce7f9882d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.994513 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:52 crc kubenswrapper[4848]: I0128 12:48:52.998180 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8xz4\" (UniqueName: \"kubernetes.io/projected/f0bcbb8b-0e11-45fc-a632-2d869e47c651-kube-api-access-m8xz4\") pod \"packageserver-d55dfcdfc-tmwll\" (UID: \"f0bcbb8b-0e11-45fc-a632-2d869e47c651\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.006050 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.021845 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.022643 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" event={"ID":"6471a57b-f563-440e-9fa7-2c24af8039c9","Type":"ContainerStarted","Data":"2b7d9c8e4c967e146ab89a27478347f766ededcf1487f8d688ea98a2fce0fa28"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.022695 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" event={"ID":"6471a57b-f563-440e-9fa7-2c24af8039c9","Type":"ContainerStarted","Data":"b7a7a3a17eae0608187a089c151b018150297b93b1a2ef1f85b7b4abb30617bd"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.022712 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" event={"ID":"1a85f330-84c9-416c-9fcc-8e775a19599d","Type":"ContainerStarted","Data":"d98eae6bcd796219a76c0ae511789a7255fb367ce330c20ecf4b477fdc4b9aa4"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.022728 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" event={"ID":"1a85f330-84c9-416c-9fcc-8e775a19599d","Type":"ContainerStarted","Data":"8602f1e327d26d379d346cc4af591817ab423c7fa94904be1663a278fd4149c1"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.022740 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" event={"ID":"1afda1fa-7adc-4d82-bd34-5743e3b89b52","Type":"ContainerDied","Data":"d9940b885f8a4084da5e70832f0b29006a74858e20a02dc38f7903474a98f733"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.022758 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" event={"ID":"244b24d8-4734-4c9f-8ba3-ef7616581a58","Type":"ContainerStarted","Data":"87c54340d473ce96d177662bb1f0f12f3552b7cf8194c5d40c308dd17cf4f362"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.022778 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.025497 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" event={"ID":"61c3ecdb-58a8-4558-a43c-81cd7e8b4132","Type":"ContainerStarted","Data":"7080c909ed55f88bc4fef0520527fc3c0909c6b1c5281ff5b0cff4557e2edb33"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.041353 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.041883 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86gr6\" (UniqueName: \"kubernetes.io/projected/4dd1be3c-1904-4ce3-9a1b-84a4f2315b96-kube-api-access-86gr6\") pod \"router-default-5444994796-djhmv\" (UID: \"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96\") " pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.043510 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.543493071 +0000 UTC m=+160.455710109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.055889 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" event={"ID":"2ace4935-baa0-4bdd-8ab6-f57415730959","Type":"ContainerStarted","Data":"acf712ea6bdf9508c425f81b3b2b9d7cdf4c12ac43e2b1a2966d19d1c857dd45"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.056562 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7dnl\" (UniqueName: \"kubernetes.io/projected/2dff4bcb-d09c-4094-8cdf-35f0e7e51176-kube-api-access-d7dnl\") pod \"ingress-canary-9k5dc\" (UID: \"2dff4bcb-d09c-4094-8cdf-35f0e7e51176\") " pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.059884 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbzfx\" (UniqueName: \"kubernetes.io/projected/970fc81c-bb86-43f0-a598-067a8d9febe9-kube-api-access-mbzfx\") pod \"migrator-59844c95c7-rtg68\" (UID: \"970fc81c-bb86-43f0-a598-067a8d9febe9\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.069113 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.070033 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" event={"ID":"2ca380f6-b02f-4128-86f6-ff19d22c532d","Type":"ContainerStarted","Data":"a4b7637e602e06d9589c1ce95fc583103c74718c8cbcfe3df5d9781c1ed095eb"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.073647 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" event={"ID":"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7","Type":"ContainerStarted","Data":"d6c31ae73c42031f1ed6f7afb981e903ecd74925bcfd37b1846bad6f17734f1a"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.073717 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" event={"ID":"16e0f86b-6544-44ba-b415-0cb5a6c5a0d7","Type":"ContainerStarted","Data":"59e6e335c06e7793854017a676f3e0b9f10a077ea3b5b37350d5110e984e971f"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.079103 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7tqv\" (UniqueName: \"kubernetes.io/projected/acf04d74-cef0-4425-9599-bbaf3fdf0374-kube-api-access-d7tqv\") pod \"kube-storage-version-migrator-operator-b67b599dd-bfrtx\" (UID: \"acf04d74-cef0-4425-9599-bbaf3fdf0374\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.090690 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" event={"ID":"4f3b423f-a356-4d55-bc15-c476dd03d771","Type":"ContainerStarted","Data":"7dfe5f33e59b2bb68242db9f4cdbff97561b1a6c1befdeb6fc0a504c6aa2a9c0"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.095620 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds"] Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.114330 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvrrx\" (UniqueName: \"kubernetes.io/projected/67e8427e-1a00-4ee0-a364-badb08b1cd8d-kube-api-access-mvrrx\") pod \"csi-hostpathplugin-dkwtc\" (UID: \"67e8427e-1a00-4ee0-a364-badb08b1cd8d\") " pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.123638 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" event={"ID":"be55bab9-c006-41aa-9347-817555da5dcf","Type":"ContainerStarted","Data":"291e2932587b0c9326655af6a27e7a8517967db621add61ff2fa333dfd1531b5"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.140573 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.142162 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.143681 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.643641373 +0000 UTC m=+160.555858411 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.148469 4848 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zwpd8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.148542 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" podUID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.149060 4848 patch_prober.go:28] interesting pod/console-operator-58897d9998-fdf5k container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.149097 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" podUID="0fe6e645-a059-4073-ad43-ecd9a8bdac14" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.149773 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" event={"ID":"fca29e57-7eeb-48d7-a4d9-2125f65816f4","Type":"ContainerStarted","Data":"8d7f655ef5d5f5bb58cdcc72a200cc918df284f5be4f866e0c3dd017da67d1fd"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.149843 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-jltf4" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.149860 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" event={"ID":"fca29e57-7eeb-48d7-a4d9-2125f65816f4","Type":"ContainerStarted","Data":"4419fdd1db18f47562c27d426f8129051b3f95abed376d1726e30a133fbd80fa"} Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.150901 4848 patch_prober.go:28] interesting pod/downloads-7954f5f757-jltf4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.150925 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jltf4" podUID="db02dfdf-9c10-4e70-80e6-29385127d7d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.152577 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pfwp\" (UniqueName: \"kubernetes.io/projected/f20bd200-d99d-423d-b650-9095869b9a8b-kube-api-access-9pfwp\") pod \"machine-config-operator-74547568cd-6p6sv\" (UID: \"f20bd200-d99d-423d-b650-9095869b9a8b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.158362 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.189260 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq6bm\" (UniqueName: \"kubernetes.io/projected/2d8c0888-72ad-42f4-993a-47ffb613e406-kube-api-access-pq6bm\") pod \"machine-config-server-9xs2f\" (UID: \"2d8c0888-72ad-42f4-993a-47ffb613e406\") " pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.190011 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4fvt\" (UniqueName: \"kubernetes.io/projected/33884de2-1133-47f6-b1c3-e49151ad1b54-kube-api-access-g4fvt\") pod \"olm-operator-6b444d44fb-hshc8\" (UID: \"33884de2-1133-47f6-b1c3-e49151ad1b54\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.190068 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.190643 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjj2r\" (UniqueName: \"kubernetes.io/projected/a78c59e1-105e-4581-a0bb-27c1d78dbdee-kube-api-access-wjj2r\") pod \"collect-profiles-29493405-thrcp\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.216578 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vk86\" (UniqueName: \"kubernetes.io/projected/f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a-kube-api-access-7vk86\") pod \"machine-config-controller-84d6567774-2qm9g\" (UID: \"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.218848 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.229417 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.229590 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmb8v\" (UniqueName: \"kubernetes.io/projected/227649b8-fbe1-4426-a63b-1a4f13700bba-kube-api-access-nmb8v\") pod \"catalog-operator-68c6474976-xqtsj\" (UID: \"227649b8-fbe1-4426-a63b-1a4f13700bba\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.247870 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.250575 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.256095 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.75605059 +0000 UTC m=+160.668267628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.267463 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.272332 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.288368 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fcjr\" (UniqueName: \"kubernetes.io/projected/265f5d9c-2988-4a0b-9353-b426422d4c48-kube-api-access-4fcjr\") pod \"apiserver-7bbb656c7d-pfwwr\" (UID: \"265f5d9c-2988-4a0b-9353-b426422d4c48\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.295982 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9k5dc" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.298654 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8crr\" (UniqueName: \"kubernetes.io/projected/3daae941-7347-4673-8fef-20c2785a8cd6-kube-api-access-l8crr\") pod \"control-plane-machine-set-operator-78cbb6b69f-p4g9c\" (UID: \"3daae941-7347-4673-8fef-20c2785a8cd6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.302606 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fxm2\" (UniqueName: \"kubernetes.io/projected/0d553491-aa2a-495d-b02c-73a52d29278b-kube-api-access-4fxm2\") pod \"marketplace-operator-79b997595-hlgtd\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.327686 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.353610 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.353965 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.354434 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.854412914 +0000 UTC m=+160.766629952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.372799 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.391137 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.403070 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.433101 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.441272 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8tnwl" podStartSLOduration=133.441252035 podStartE2EDuration="2m13.441252035s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:53.432805435 +0000 UTC m=+160.345022473" watchObservedRunningTime="2026-01-28 12:48:53.441252035 +0000 UTC m=+160.353469073" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.442841 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v"] Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.455171 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.455947 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:53.955918033 +0000 UTC m=+160.868135071 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.477735 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-9xs2f" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.519784 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.556716 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.558087 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.05806447 +0000 UTC m=+160.970281518 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.659387 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.659877 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.159853838 +0000 UTC m=+161.072070876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.761135 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.761937 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.261905312 +0000 UTC m=+161.174122350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.798005 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" podStartSLOduration=132.797984693 podStartE2EDuration="2m12.797984693s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:53.797835519 +0000 UTC m=+160.710052557" watchObservedRunningTime="2026-01-28 12:48:53.797984693 +0000 UTC m=+160.710201731" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.798436 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vfjn4" podStartSLOduration=133.798428955 podStartE2EDuration="2m13.798428955s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:53.767623827 +0000 UTC m=+160.679840875" watchObservedRunningTime="2026-01-28 12:48:53.798428955 +0000 UTC m=+160.710645993" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.824212 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-q4jxp" podStartSLOduration=133.824191685 podStartE2EDuration="2m13.824191685s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:53.819819897 +0000 UTC m=+160.732036925" watchObservedRunningTime="2026-01-28 12:48:53.824191685 +0000 UTC m=+160.736408723" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.833617 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bhs7s"] Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.863589 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.864323 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.364307277 +0000 UTC m=+161.276524315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.926640 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg"] Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.956952 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pslnn" podStartSLOduration=133.956904214 podStartE2EDuration="2m13.956904214s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:53.949310237 +0000 UTC m=+160.861527275" watchObservedRunningTime="2026-01-28 12:48:53.956904214 +0000 UTC m=+160.869121252" Jan 28 12:48:53 crc kubenswrapper[4848]: I0128 12:48:53.967539 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:53 crc kubenswrapper[4848]: E0128 12:48:53.967944 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.467923254 +0000 UTC m=+161.380140292 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.018053 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4jt4" podStartSLOduration=134.018028316 podStartE2EDuration="2m14.018028316s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:53.981267066 +0000 UTC m=+160.893484104" watchObservedRunningTime="2026-01-28 12:48:54.018028316 +0000 UTC m=+160.930245354" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.069452 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.070044 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.570027779 +0000 UTC m=+161.482244817 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.076787 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-wgv95"] Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.130390 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-zdq5h" podStartSLOduration=133.13037069 podStartE2EDuration="2m13.13037069s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:54.129405933 +0000 UTC m=+161.041622971" watchObservedRunningTime="2026-01-28 12:48:54.13037069 +0000 UTC m=+161.042587728" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.171627 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.172712 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.67266741 +0000 UTC m=+161.584884458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.232512 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vvb9q" podStartSLOduration=134.232475406 podStartE2EDuration="2m14.232475406s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:54.20908084 +0000 UTC m=+161.121297898" watchObservedRunningTime="2026-01-28 12:48:54.232475406 +0000 UTC m=+161.144692444" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.280897 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.281521 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.781495319 +0000 UTC m=+161.693712357 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.302419 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" event={"ID":"61c3ecdb-58a8-4558-a43c-81cd7e8b4132","Type":"ContainerStarted","Data":"3c28d6357f5c2b631cfb923e595df612a6eeb79f25cbc7ee163cdbbf4943ac2b"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.303735 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.317569 4848 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qrnzf container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.28:6443/healthz\": dial tcp 10.217.0.28:6443: connect: connection refused" start-of-body= Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.317642 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" podUID="61c3ecdb-58a8-4558-a43c-81cd7e8b4132" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.28:6443/healthz\": dial tcp 10.217.0.28:6443: connect: connection refused" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.336071 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" event={"ID":"4f3b423f-a356-4d55-bc15-c476dd03d771","Type":"ContainerStarted","Data":"aae9c2b7f7e995979fcd9efcefe41301ff352f6234c1b978fbdba08742054d19"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.336118 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" event={"ID":"4f3b423f-a356-4d55-bc15-c476dd03d771","Type":"ContainerStarted","Data":"3d71bbe3d2b1beae3317e5737042159497123aa02095de012164e02d3f040649"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.343874 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-9xs2f" event={"ID":"2d8c0888-72ad-42f4-993a-47ffb613e406","Type":"ContainerStarted","Data":"8dab8efb17ed8362443d0e02ef45c10c5708ee2f77756a8b439d8ac7c350b8ff"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.371340 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" event={"ID":"244b24d8-4734-4c9f-8ba3-ef7616581a58","Type":"ContainerStarted","Data":"9512dc2236dfb62b2d696bb5bf755cf870c8bcb90ce735e2f8524d31687f3a01"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.386813 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.387374 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.887348656 +0000 UTC m=+161.799565694 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.408410 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" event={"ID":"e9c4f39e-68b1-4abb-9e40-553abbce5d0f","Type":"ContainerStarted","Data":"5444958a4ecf0519cddf714fb5253833c4cac8a5b44bfc00020ca83f4e3b3043"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.419090 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" event={"ID":"9e228a32-b6c0-4796-9e16-ad8fce227175","Type":"ContainerStarted","Data":"2f15b11711434847493c3d4de6487775ed2606fdda6c285a2bc798213ea35980"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.437099 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wj7x7" podStartSLOduration=134.437083079 podStartE2EDuration="2m14.437083079s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:54.435834675 +0000 UTC m=+161.348051713" watchObservedRunningTime="2026-01-28 12:48:54.437083079 +0000 UTC m=+161.349300107" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.437549 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" podStartSLOduration=134.437543451 podStartE2EDuration="2m14.437543451s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:54.386257627 +0000 UTC m=+161.298474665" watchObservedRunningTime="2026-01-28 12:48:54.437543451 +0000 UTC m=+161.349760489" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.437609 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" event={"ID":"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4","Type":"ContainerStarted","Data":"50cccb626a4b43c96c905bcd297c5eadea4acce8237179cb8522ed227ede7998"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.458868 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" event={"ID":"1afda1fa-7adc-4d82-bd34-5743e3b89b52","Type":"ContainerStarted","Data":"aa43f66fc14f6ab733261033941871e8142a85e33fb0f100bb6ee700aa5b6a14"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.459720 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.471583 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-djhmv" event={"ID":"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96","Type":"ContainerStarted","Data":"0c656b89eb48c86ad22ee2812d2ff1b0647cf1e7049c18ade1cfabee773fa99a"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.476597 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" event={"ID":"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390","Type":"ContainerStarted","Data":"7ef0f780f694824f6b5c14055ab087680199b98929783ad61ae2366348a41f79"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.491080 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.493514 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:54.993496462 +0000 UTC m=+161.905713510 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.509843 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" event={"ID":"be55bab9-c006-41aa-9347-817555da5dcf","Type":"ContainerStarted","Data":"1f8d23391170be0ecaa302bc9f0b6de848452de26b5acd30c6822dbd1cfcee02"} Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.510723 4848 patch_prober.go:28] interesting pod/downloads-7954f5f757-jltf4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.510773 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jltf4" podUID="db02dfdf-9c10-4e70-80e6-29385127d7d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.556966 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.593916 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.594481 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.094445097 +0000 UTC m=+162.006662205 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.594870 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.600665 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.100650426 +0000 UTC m=+162.012867464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.696167 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.698448 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.198416873 +0000 UTC m=+162.110633911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.800918 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.801401 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.301384183 +0000 UTC m=+162.213601221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.833095 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" podStartSLOduration=134.833060675 podStartE2EDuration="2m14.833060675s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:54.801889116 +0000 UTC m=+161.714106164" watchObservedRunningTime="2026-01-28 12:48:54.833060675 +0000 UTC m=+161.745277723" Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.902320 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.902496 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.402470491 +0000 UTC m=+162.314687529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:54 crc kubenswrapper[4848]: I0128 12:48:54.902619 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:54 crc kubenswrapper[4848]: E0128 12:48:54.902968 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.402959965 +0000 UTC m=+162.315177003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.003709 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.003907 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.503884709 +0000 UTC m=+162.416101757 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.005636 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.007186 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.507171678 +0000 UTC m=+162.419388716 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.023447 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5dbnv" podStartSLOduration=135.02342681 podStartE2EDuration="2m15.02342681s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:55.018544767 +0000 UTC m=+161.930761805" watchObservedRunningTime="2026-01-28 12:48:55.02342681 +0000 UTC m=+161.935643848" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.058595 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-jltf4" podStartSLOduration=135.058565815 podStartE2EDuration="2m15.058565815s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:55.051847743 +0000 UTC m=+161.964064791" watchObservedRunningTime="2026-01-28 12:48:55.058565815 +0000 UTC m=+161.970782853" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.109338 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.109564 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.6095258 +0000 UTC m=+162.521742838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.109952 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.110515 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.610493617 +0000 UTC m=+162.522710655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.218882 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.219095 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.719053578 +0000 UTC m=+162.631270616 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.219287 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.219689 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.719671205 +0000 UTC m=+162.631888243 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.244432 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" podStartSLOduration=135.244398147 podStartE2EDuration="2m15.244398147s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:55.241682803 +0000 UTC m=+162.153899851" watchObservedRunningTime="2026-01-28 12:48:55.244398147 +0000 UTC m=+162.156615185" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.316993 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" podStartSLOduration=135.31697523 podStartE2EDuration="2m15.31697523s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:55.315972333 +0000 UTC m=+162.228189371" watchObservedRunningTime="2026-01-28 12:48:55.31697523 +0000 UTC m=+162.229192258" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.320939 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.321535 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.821517414 +0000 UTC m=+162.733734452 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.364753 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-rg97d" podStartSLOduration=135.364731799 podStartE2EDuration="2m15.364731799s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:55.359906018 +0000 UTC m=+162.272123066" watchObservedRunningTime="2026-01-28 12:48:55.364731799 +0000 UTC m=+162.276948847" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.410401 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.410485 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.417986 4848 patch_prober.go:28] interesting pod/apiserver-76f77b778f-84b8w container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.418085 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" podUID="244b24d8-4734-4c9f-8ba3-ef7616581a58" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.424887 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.425282 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:55.925254904 +0000 UTC m=+162.837471942 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.463633 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-fdf5k" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.485697 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" podStartSLOduration=135.485675697 podStartE2EDuration="2m15.485675697s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:55.483223011 +0000 UTC m=+162.395440049" watchObservedRunningTime="2026-01-28 12:48:55.485675697 +0000 UTC m=+162.397892755" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.529895 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.530896 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.030866816 +0000 UTC m=+162.943083864 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.631421 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.631816 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.13180015 +0000 UTC m=+163.044017178 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.659259 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wgv95" event={"ID":"3c290ae4-345a-4877-b020-2d64197e1bf2","Type":"ContainerStarted","Data":"9264252d0fafe7aa3c7a6fe690536cedf84880f1718efb5fdd2bbcf72f5a91bb"} Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.677671 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" event={"ID":"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390","Type":"ContainerStarted","Data":"be49693ac0e45b4d5d09b76b09c1f61f5943ea5b207866a2b7f843853f9b0dd0"} Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.722959 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" event={"ID":"9e228a32-b6c0-4796-9e16-ad8fce227175","Type":"ContainerStarted","Data":"7c64ec4bb303a4aed87159ee9ead84434b014f336584535c5b6cebc6d2dd8e6b"} Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.733158 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.733349 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.23332617 +0000 UTC m=+163.145543208 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.733620 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.735219 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.235202991 +0000 UTC m=+163.147420029 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.784779 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" event={"ID":"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4","Type":"ContainerStarted","Data":"5b447a7644ea10d962fd6a0cceaf3f078e337e370484af39d958f0017617eb1f"} Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.785844 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-bhs7s" podStartSLOduration=134.785816617 podStartE2EDuration="2m14.785816617s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:55.783420852 +0000 UTC m=+162.695637900" watchObservedRunningTime="2026-01-28 12:48:55.785816617 +0000 UTC m=+162.698033655" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.822247 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm"] Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.846798 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.847404 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.347376701 +0000 UTC m=+163.259593739 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:55 crc kubenswrapper[4848]: W0128 12:48:55.884778 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9417b0a9_6016_419f_bf13_94812e88ca91.slice/crio-e3448a532f2cd49c57aa852518d2271a35b4c4d302fe3bd9dd90abbe8a4ddc7f WatchSource:0}: Error finding container e3448a532f2cd49c57aa852518d2271a35b4c4d302fe3bd9dd90abbe8a4ddc7f: Status 404 returned error can't find the container with id e3448a532f2cd49c57aa852518d2271a35b4c4d302fe3bd9dd90abbe8a4ddc7f Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.920214 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:48:55 crc kubenswrapper[4848]: I0128 12:48:55.948485 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:55 crc kubenswrapper[4848]: E0128 12:48:55.950875 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.450854174 +0000 UTC m=+163.363071422 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.049735 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.087031 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.586951804 +0000 UTC m=+163.499168842 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.187118 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.195570 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.695542686 +0000 UTC m=+163.607759724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: W0128 12:48:56.204633 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podacf04d74_cef0_4425_9599_bbaf3fdf0374.slice/crio-a3e90a8708b38297c353972f2418b6b5288d2a4a2e40e7aaddacfd6dfd754d9b WatchSource:0}: Error finding container a3e90a8708b38297c353972f2418b6b5288d2a4a2e40e7aaddacfd6dfd754d9b: Status 404 returned error can't find the container with id a3e90a8708b38297c353972f2418b6b5288d2a4a2e40e7aaddacfd6dfd754d9b Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.209143 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.227860 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dkwtc"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.247324 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlgtd"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.283583 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fw7xc"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.297496 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.298087 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.798056084 +0000 UTC m=+163.710273122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.300116 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll"] Jan 28 12:48:56 crc kubenswrapper[4848]: W0128 12:48:56.373860 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d553491_aa2a_495d_b02c_73a52d29278b.slice/crio-196ce14f09e1d427f4980821118e59920e1fb4f94595405d10a7c246fbb72d8e WatchSource:0}: Error finding container 196ce14f09e1d427f4980821118e59920e1fb4f94595405d10a7c246fbb72d8e: Status 404 returned error can't find the container with id 196ce14f09e1d427f4980821118e59920e1fb4f94595405d10a7c246fbb72d8e Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.399799 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.400847 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:56.900822477 +0000 UTC m=+163.813039525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: W0128 12:48:56.436674 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9004f250_91b4_440a_b7a1_60b81c8070fa.slice/crio-17b12f004b374b4f6423d1ba929ef4615254e3ff72912db3abe97c57a42e46b5 WatchSource:0}: Error finding container 17b12f004b374b4f6423d1ba929ef4615254e3ff72912db3abe97c57a42e46b5: Status 404 returned error can't find the container with id 17b12f004b374b4f6423d1ba929ef4615254e3ff72912db3abe97c57a42e46b5 Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.502563 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.503505 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.003476138 +0000 UTC m=+163.915693186 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.563498 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.609476 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.609828 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.109811039 +0000 UTC m=+164.022028077 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.706591 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9k5dc"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.708611 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.712174 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.712795 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.212764749 +0000 UTC m=+164.124981787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.727833 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.744343 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-dx4hg"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.755365 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.760250 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.815846 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.816237 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.316222971 +0000 UTC m=+164.228440009 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.835736 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" event={"ID":"f7665ed2-3bf7-4684-ab55-d7df2a0ce4b4","Type":"ContainerStarted","Data":"3e84fe4ecae54db5aedba41651cbcdfd65e0015398b02b2152f3290eb7a8216e"} Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.837734 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.881890 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" podStartSLOduration=135.881858396 podStartE2EDuration="2m15.881858396s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:56.881673431 +0000 UTC m=+163.793890469" watchObservedRunningTime="2026-01-28 12:48:56.881858396 +0000 UTC m=+163.794075434" Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.917323 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:56 crc kubenswrapper[4848]: E0128 12:48:56.919063 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.419025956 +0000 UTC m=+164.331242994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.941781 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.961358 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" event={"ID":"33884de2-1133-47f6-b1c3-e49151ad1b54","Type":"ContainerStarted","Data":"a43f51f95d179c83e63524085520bdf60300cadf76f0fa6146ca4ef3bd415c6d"} Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.961437 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" event={"ID":"9004f250-91b4-440a-b7a1-60b81c8070fa","Type":"ContainerStarted","Data":"17b12f004b374b4f6423d1ba929ef4615254e3ff72912db3abe97c57a42e46b5"} Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.961467 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" event={"ID":"9417b0a9-6016-419f-bf13-94812e88ca91","Type":"ContainerStarted","Data":"d20cc89f533dddc81313772faadbdebd043901ab1d6751871a6e1e5b6506fa4d"} Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.961498 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" event={"ID":"9417b0a9-6016-419f-bf13-94812e88ca91","Type":"ContainerStarted","Data":"e3448a532f2cd49c57aa852518d2271a35b4c4d302fe3bd9dd90abbe8a4ddc7f"} Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.961513 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp"] Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.976484 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-xd2dm" podStartSLOduration=135.976462158 podStartE2EDuration="2m15.976462158s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:56.974253168 +0000 UTC m=+163.886470206" watchObservedRunningTime="2026-01-28 12:48:56.976462158 +0000 UTC m=+163.888679196" Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.977047 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" event={"ID":"e9c4f39e-68b1-4abb-9e40-553abbce5d0f","Type":"ContainerStarted","Data":"ec8bde93dee7f9fc808ce8d1a1833a0ae45154a663d4d640fe16e9e4dc376a12"} Jan 28 12:48:56 crc kubenswrapper[4848]: I0128 12:48:56.977997 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fh6qn" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.000237 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" event={"ID":"acf04d74-cef0-4425-9599-bbaf3fdf0374","Type":"ContainerStarted","Data":"a3e90a8708b38297c353972f2418b6b5288d2a4a2e40e7aaddacfd6dfd754d9b"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.019219 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.020877 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.520848375 +0000 UTC m=+164.433065413 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.039723 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wgv95" event={"ID":"3c290ae4-345a-4877-b020-2d64197e1bf2","Type":"ContainerStarted","Data":"98256f591b13130a95c084f9039a2d4c0916bc246dac80b5735314157a94a57b"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.043693 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g"] Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.110906 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" event={"ID":"67e8427e-1a00-4ee0-a364-badb08b1cd8d","Type":"ContainerStarted","Data":"42e2249c56e2f0fe8880107f240b7675b3ffc93bc258912db82370bbc06dcf0b"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.132008 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-22cds" podStartSLOduration=136.131977285 podStartE2EDuration="2m16.131977285s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:57.129794576 +0000 UTC m=+164.042011644" watchObservedRunningTime="2026-01-28 12:48:57.131977285 +0000 UTC m=+164.044194323" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.144112 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.184723 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" event={"ID":"dfbdc2ca-b6b3-48f7-b469-67b6ab2b5390","Type":"ContainerStarted","Data":"01859371e533f6a53598cb2b51fbfb9543251d05d6fb839bce560857a0ff584d"} Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.185867 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.685828319 +0000 UTC m=+164.598045367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.189973 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" event={"ID":"be55bab9-c006-41aa-9347-817555da5dcf","Type":"ContainerStarted","Data":"ec3d6d7d7c7a69a409d1f69f22d8384cabd9bd825db2d75b467b13dae4a6d9c3"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.196586 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" event={"ID":"0d553491-aa2a-495d-b02c-73a52d29278b","Type":"ContainerStarted","Data":"196ce14f09e1d427f4980821118e59920e1fb4f94595405d10a7c246fbb72d8e"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.256267 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" event={"ID":"f0bcbb8b-0e11-45fc-a632-2d869e47c651","Type":"ContainerStarted","Data":"6bf614268f68be6325d107dfa30deaa36b681204623108d523867414f4565126"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.280995 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.281500 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.781484781 +0000 UTC m=+164.693701819 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.288277 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.289380 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.348023 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-djhmv" event={"ID":"4dd1be3c-1904-4ce3-9a1b-84a4f2315b96","Type":"ContainerStarted","Data":"c238121229722b60189e31e4bda8d0c5721118a05597ef1f6e510e7354b2fd80"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.349546 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.369843 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.385092 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.385499 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.885453817 +0000 UTC m=+164.797670855 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.385550 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.385604 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.385670 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.386057 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.886033163 +0000 UTC m=+164.798250201 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.428079 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.444319 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-9xs2f" event={"ID":"2d8c0888-72ad-42f4-993a-47ffb613e406","Type":"ContainerStarted","Data":"2be03dcd60c58dde3ba079dd2c36613cd72307b49752f6307cf12b5c71e6f4b6"} Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.480961 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xh78v" podStartSLOduration=137.480938693 podStartE2EDuration="2m17.480938693s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:57.44220335 +0000 UTC m=+164.354420388" watchObservedRunningTime="2026-01-28 12:48:57.480938693 +0000 UTC m=+164.393155731" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.489531 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.489697 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.489767 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.491100 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:57.991079628 +0000 UTC m=+164.903296666 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.491143 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.592453 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.595886 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.095856497 +0000 UTC m=+165.008073705 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.703743 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.704338 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.204314625 +0000 UTC m=+165.116531663 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.743108 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.771100 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-bcrg5" podStartSLOduration=137.77106808 podStartE2EDuration="2m17.77106808s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:57.765512029 +0000 UTC m=+164.677729067" watchObservedRunningTime="2026-01-28 12:48:57.77106808 +0000 UTC m=+164.683285128" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.812479 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.813097 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.313075502 +0000 UTC m=+165.225292540 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.851712 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:48:57 crc kubenswrapper[4848]: I0128 12:48:57.914846 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:57 crc kubenswrapper[4848]: E0128 12:48:57.915572 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.415541329 +0000 UTC m=+165.327758367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.021538 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.022510 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.522480776 +0000 UTC m=+165.434697814 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.028832 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-djhmv" podStartSLOduration=138.028804717 podStartE2EDuration="2m18.028804717s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.026889276 +0000 UTC m=+164.939106314" watchObservedRunningTime="2026-01-28 12:48:58.028804717 +0000 UTC m=+164.941021755" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.084328 4848 csr.go:261] certificate signing request csr-8qvv9 is approved, waiting to be issued Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.117688 4848 csr.go:257] certificate signing request csr-8qvv9 is issued Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.122789 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.122918 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.622888356 +0000 UTC m=+165.535105394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.123024 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.123412 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.623397579 +0000 UTC m=+165.535614617 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.225345 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.725221667 +0000 UTC m=+165.637438705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.225401 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.225730 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.226203 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.726191434 +0000 UTC m=+165.638408472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.327937 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.328040 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.828023623 +0000 UTC m=+165.740240661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.328146 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.328459 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.328974 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.828965198 +0000 UTC m=+165.741182236 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.336110 4848 patch_prober.go:28] interesting pod/router-default-5444994796-djhmv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:48:58 crc kubenswrapper[4848]: [-]has-synced failed: reason withheld Jan 28 12:48:58 crc kubenswrapper[4848]: [+]process-running ok Jan 28 12:48:58 crc kubenswrapper[4848]: healthz check failed Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.336220 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-djhmv" podUID="4dd1be3c-1904-4ce3-9a1b-84a4f2315b96" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.429596 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.429935 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:58.929884272 +0000 UTC m=+165.842101310 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.449586 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9k5dc" event={"ID":"2dff4bcb-d09c-4094-8cdf-35f0e7e51176","Type":"ContainerStarted","Data":"ba30b863748fb339bcb11107a60ec98e75abb429643c2509c0987ab9ab62400a"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.449658 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9k5dc" event={"ID":"2dff4bcb-d09c-4094-8cdf-35f0e7e51176","Type":"ContainerStarted","Data":"a66cae27c0102fb27eba8dc6bb48619c64223a8ed94f3ee5e9fdb585c20ecacc"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.453024 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" event={"ID":"265f5d9c-2988-4a0b-9353-b426422d4c48","Type":"ContainerStarted","Data":"814cf637a5c44bc49caf7e9cc53503ab8b756f25993df8cb628f713431c3f593"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.457304 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" event={"ID":"a78c59e1-105e-4581-a0bb-27c1d78dbdee","Type":"ContainerStarted","Data":"054562811f8e2b39573262f9893cce064e89de0b153147ea7ba0a362026b02be"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.457353 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" event={"ID":"a78c59e1-105e-4581-a0bb-27c1d78dbdee","Type":"ContainerStarted","Data":"cc8cfa51a67c43940d62787c4a47311afc1ee59b10654f52abca1ce8134ca3f2"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.463526 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" event={"ID":"3daae941-7347-4673-8fef-20c2785a8cd6","Type":"ContainerStarted","Data":"f953cdea1595989d588b19ed22f9dbf8502a181d94879ff1922fa3adb0fc88e0"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.472136 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" event={"ID":"8e60e303-4483-44cb-b22f-f2cce7f9882d","Type":"ContainerStarted","Data":"9c15a5e86c12b22b88c7ba06130987f79f017b7c71de598c52bbe142ffdbb542"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.478678 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wgv95" event={"ID":"3c290ae4-345a-4877-b020-2d64197e1bf2","Type":"ContainerStarted","Data":"f6a6290e7577244969633d476070eb9849af9e299234fd8f7d905d0c83eb7307"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.479603 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-wgv95" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.485208 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" event={"ID":"0d553491-aa2a-495d-b02c-73a52d29278b","Type":"ContainerStarted","Data":"11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.485517 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.487969 4848 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-hlgtd container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.488251 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.490213 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" event={"ID":"970fc81c-bb86-43f0-a598-067a8d9febe9","Type":"ContainerStarted","Data":"38062a408265d5fcc795b596a85b8aab573b318330c38a7914837fdf36c8eb6a"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.490319 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" event={"ID":"970fc81c-bb86-43f0-a598-067a8d9febe9","Type":"ContainerStarted","Data":"09dc504d8fdcc149b77c10f7f18731386352ded2945290bc9096351bd8bf2379"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.492871 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" event={"ID":"acf04d74-cef0-4425-9599-bbaf3fdf0374","Type":"ContainerStarted","Data":"18b4a5a739774def5e6239fac392e5433b03462bd97ee93a3be6e898807e0ea3"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.495763 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" event={"ID":"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a","Type":"ContainerStarted","Data":"adbe7787e0a70f2f027a4280ad7bbce048305c6505fd354aee9de742dedd23df"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.495833 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" event={"ID":"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a","Type":"ContainerStarted","Data":"7c049257e3427596c8bb6213ac9066847e056e9b9d4d968ca4ee9c0eccac97dd"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.497774 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-9xs2f" podStartSLOduration=8.497746577000001 podStartE2EDuration="8.497746577s" podCreationTimestamp="2026-01-28 12:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.084617425 +0000 UTC m=+164.996834463" watchObservedRunningTime="2026-01-28 12:48:58.497746577 +0000 UTC m=+165.409963615" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.500127 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" event={"ID":"9004f250-91b4-440a-b7a1-60b81c8070fa","Type":"ContainerStarted","Data":"3a637b6983a747711d3460261afd5d7da9d39437615960cfb66d75156156031b"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.502019 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" event={"ID":"f20bd200-d99d-423d-b650-9095869b9a8b","Type":"ContainerStarted","Data":"e9c008d162b52df9a937a40e1a9c6c29efc5faa33f6f6f71b08206678b7e8b86"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.502064 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" event={"ID":"f20bd200-d99d-423d-b650-9095869b9a8b","Type":"ContainerStarted","Data":"487874efdc4d63e226605085df8544cecec5e0842bfcd6f3ae5c32f0257e3bb7"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.504030 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" event={"ID":"227649b8-fbe1-4426-a63b-1a4f13700bba","Type":"ContainerStarted","Data":"a84c812ed27707e5ca26b665de027382c090e98317aa17efa42cc97040424a05"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.504101 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" event={"ID":"227649b8-fbe1-4426-a63b-1a4f13700bba","Type":"ContainerStarted","Data":"9236f13a595b15eec310b6dfea741920ab4ff98deb835f5a928ebc5a9c5f130e"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.504235 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.506536 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" event={"ID":"f0bcbb8b-0e11-45fc-a632-2d869e47c651","Type":"ContainerStarted","Data":"f105e166f686e538bafb3db92c0e92d32748f4338486bbc5623e91f274db345c"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.507317 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.510010 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" event={"ID":"33884de2-1133-47f6-b1c3-e49151ad1b54","Type":"ContainerStarted","Data":"cd3deb70dd8a7eb8ea7c717e20f7b75bcd7cf0b1ec56c341b93e7932e5f3a01b"} Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.510199 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.511350 4848 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xqtsj container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.511407 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" podUID="227649b8-fbe1-4426-a63b-1a4f13700bba" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.512938 4848 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-tmwll container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" start-of-body= Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.513016 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" podUID="f0bcbb8b-0e11-45fc-a632-2d869e47c651" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.513157 4848 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-hshc8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.513274 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" podUID="33884de2-1133-47f6-b1c3-e49151ad1b54" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.532181 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.532680 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.032652226 +0000 UTC m=+165.944869274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.558953 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" podStartSLOduration=138.558930191 podStartE2EDuration="2m18.558930191s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.501301324 +0000 UTC m=+165.413518372" watchObservedRunningTime="2026-01-28 12:48:58.558930191 +0000 UTC m=+165.471147229" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.637553 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.646556 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.146529702 +0000 UTC m=+166.058746740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.657403 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.658315 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-wgv95" podStartSLOduration=8.658276641 podStartE2EDuration="8.658276641s" podCreationTimestamp="2026-01-28 12:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.644688792 +0000 UTC m=+165.556905830" watchObservedRunningTime="2026-01-28 12:48:58.658276641 +0000 UTC m=+165.570493679" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.660055 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-fw7xc" podStartSLOduration=138.66004377 podStartE2EDuration="2m18.66004377s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.561379487 +0000 UTC m=+165.473596545" watchObservedRunningTime="2026-01-28 12:48:58.66004377 +0000 UTC m=+165.572260808" Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.670093 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.170072222 +0000 UTC m=+166.082289270 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.737682 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bfrtx" podStartSLOduration=137.737649149 podStartE2EDuration="2m17.737649149s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.729678543 +0000 UTC m=+165.641895601" watchObservedRunningTime="2026-01-28 12:48:58.737649149 +0000 UTC m=+165.649866187" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.759870 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.760229 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.260212402 +0000 UTC m=+166.172429440 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.846720 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" podStartSLOduration=137.846703014 podStartE2EDuration="2m17.846703014s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.790625649 +0000 UTC m=+165.702842697" watchObservedRunningTime="2026-01-28 12:48:58.846703014 +0000 UTC m=+165.758920072" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.863165 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.863556 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.363540032 +0000 UTC m=+166.275757080 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.898131 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" podStartSLOduration=137.898109452 podStartE2EDuration="2m17.898109452s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.854533797 +0000 UTC m=+165.766750865" watchObservedRunningTime="2026-01-28 12:48:58.898109452 +0000 UTC m=+165.810326490" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.950392 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" podStartSLOduration=137.950361282 podStartE2EDuration="2m17.950361282s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.899870099 +0000 UTC m=+165.812087137" watchObservedRunningTime="2026-01-28 12:48:58.950361282 +0000 UTC m=+165.862578320" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.952516 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" podStartSLOduration=137.952508171 podStartE2EDuration="2m17.952508171s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:58.950507026 +0000 UTC m=+165.862724064" watchObservedRunningTime="2026-01-28 12:48:58.952508171 +0000 UTC m=+165.864725209" Jan 28 12:48:58 crc kubenswrapper[4848]: I0128 12:48:58.979147 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:58 crc kubenswrapper[4848]: E0128 12:48:58.979693 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.479665769 +0000 UTC m=+166.391882807 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.024683 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.084551 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.085119 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.585096485 +0000 UTC m=+166.497313523 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.119622 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-28 12:43:58 +0000 UTC, rotation deadline is 2026-10-16 13:04:09.870635309 +0000 UTC Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.120292 4848 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6264h15m10.750351097s for next certificate rotation Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.185787 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.186075 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.68606061 +0000 UTC m=+166.598277648 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.287758 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.288302 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.788272639 +0000 UTC m=+166.700489667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.337178 4848 patch_prober.go:28] interesting pod/router-default-5444994796-djhmv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:48:59 crc kubenswrapper[4848]: [-]has-synced failed: reason withheld Jan 28 12:48:59 crc kubenswrapper[4848]: [+]process-running ok Jan 28 12:48:59 crc kubenswrapper[4848]: healthz check failed Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.337268 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-djhmv" podUID="4dd1be3c-1904-4ce3-9a1b-84a4f2315b96" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.396283 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.396552 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.896502382 +0000 UTC m=+166.808719420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.396674 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.397201 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:48:59.897189951 +0000 UTC m=+166.809407159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.500533 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.500758 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.000714175 +0000 UTC m=+166.912931213 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.501015 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.501606 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.001592439 +0000 UTC m=+166.913809667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.547411 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" event={"ID":"3daae941-7347-4673-8fef-20c2785a8cd6","Type":"ContainerStarted","Data":"a829a63c1bf13bf3bfd496ac312242b5ed60323b5e4475b3a6e4d78465f053fc"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.574379 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" event={"ID":"8e60e303-4483-44cb-b22f-f2cce7f9882d","Type":"ContainerStarted","Data":"e124901e9cd6bbaf0671c33f9204cfa873968e1b7f49823b9e8550e3340bcaa1"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.574469 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" event={"ID":"8e60e303-4483-44cb-b22f-f2cce7f9882d","Type":"ContainerStarted","Data":"61fde9739efbf8f753f3e99af816156b9049a91f5c879d1b16ea00888584c173"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.596002 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-p4g9c" podStartSLOduration=138.595970325 podStartE2EDuration="2m18.595970325s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:59.59213145 +0000 UTC m=+166.504348488" watchObservedRunningTime="2026-01-28 12:48:59.595970325 +0000 UTC m=+166.508187363" Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.601759 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.601968 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.101925627 +0000 UTC m=+167.014142665 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.602086 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.602520 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.102502682 +0000 UTC m=+167.014719720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.602859 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" event={"ID":"970fc81c-bb86-43f0-a598-067a8d9febe9","Type":"ContainerStarted","Data":"5e7ba0800310d965d383ed013151354daae682da536032ea69d3d702955e2d3b"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.619298 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c024f265-eb17-4ea2-82bf-6b5de7fd7b15","Type":"ContainerStarted","Data":"238ae41652a41bd5544557c2e8ef257c2b590eda7ff09f0cede23d214bdf9d6a"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.664569 4848 generic.go:334] "Generic (PLEG): container finished" podID="265f5d9c-2988-4a0b-9353-b426422d4c48" containerID="5bc33e0fe1a2d42c1edc72c9feb2ca4b7c96dbae9de16bfb0202307bc9e034c2" exitCode=0 Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.664750 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" event={"ID":"265f5d9c-2988-4a0b-9353-b426422d4c48","Type":"ContainerDied","Data":"5bc33e0fe1a2d42c1edc72c9feb2ca4b7c96dbae9de16bfb0202307bc9e034c2"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.688709 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" event={"ID":"f308d19a-e3c7-4623-b8bc-aea8d8d9cd4a","Type":"ContainerStarted","Data":"ed00a185c2093a27bce7ce4325bff73fdf4c7d64f02f6b26fdbd9acdc8255117"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.709406 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.710989 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.210964361 +0000 UTC m=+167.123181399 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.711760 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" event={"ID":"f20bd200-d99d-423d-b650-9095869b9a8b","Type":"ContainerStarted","Data":"68443c2268d49163d33dd9f436f64743e5fdde1dfb5a00fc86b86d6d5d8157cb"} Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.712620 4848 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-hshc8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.712682 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" podUID="33884de2-1133-47f6-b1c3-e49151ad1b54" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.712788 4848 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xqtsj container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.712842 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" podUID="227649b8-fbe1-4426-a63b-1a4f13700bba" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.713133 4848 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-hlgtd container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.713152 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.742974 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-dx4hg" podStartSLOduration=138.7429501 podStartE2EDuration="2m18.7429501s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:59.731126869 +0000 UTC m=+166.643343927" watchObservedRunningTime="2026-01-28 12:48:59.7429501 +0000 UTC m=+166.655167138" Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.815195 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.820888 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.320864439 +0000 UTC m=+167.233081607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.868796 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rtg68" podStartSLOduration=138.868778201 podStartE2EDuration="2m18.868778201s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:59.839866406 +0000 UTC m=+166.752083444" watchObservedRunningTime="2026-01-28 12:48:59.868778201 +0000 UTC m=+166.780995239" Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.916340 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:48:59 crc kubenswrapper[4848]: E0128 12:48:59.916948 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.41692978 +0000 UTC m=+167.329146818 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:48:59 crc kubenswrapper[4848]: I0128 12:48:59.960927 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6p6sv" podStartSLOduration=138.960900226 podStartE2EDuration="2m18.960900226s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:59.958631384 +0000 UTC m=+166.870848442" watchObservedRunningTime="2026-01-28 12:48:59.960900226 +0000 UTC m=+166.873117254" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.020883 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.021363 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.52134386 +0000 UTC m=+167.433560898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.054103 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-9k5dc" podStartSLOduration=10.054077309 podStartE2EDuration="10.054077309s" podCreationTimestamp="2026-01-28 12:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:00.006197827 +0000 UTC m=+166.918414865" watchObservedRunningTime="2026-01-28 12:49:00.054077309 +0000 UTC m=+166.966294347" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.122515 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.123125 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.623053785 +0000 UTC m=+167.535270823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.123497 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.124062 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.624041671 +0000 UTC m=+167.536258709 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.133298 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.146128 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.135756 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2qm9g" podStartSLOduration=139.135723529 podStartE2EDuration="2m19.135723529s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:00.133323064 +0000 UTC m=+167.045540102" watchObservedRunningTime="2026-01-28 12:49:00.135723529 +0000 UTC m=+167.047940567" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.157578 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.157642 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.216345 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.225339 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.225799 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e74759a3-551c-4681-9cef-1478d3806c4c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.225888 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e74759a3-551c-4681-9cef-1478d3806c4c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.226062 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.726037355 +0000 UTC m=+167.638254393 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.334247 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e74759a3-551c-4681-9cef-1478d3806c4c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.334413 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e74759a3-551c-4681-9cef-1478d3806c4c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.334463 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.334678 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e74759a3-551c-4681-9cef-1478d3806c4c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.335024 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.835003358 +0000 UTC m=+167.747220386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.349321 4848 patch_prober.go:28] interesting pod/router-default-5444994796-djhmv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:49:00 crc kubenswrapper[4848]: [-]has-synced failed: reason withheld Jan 28 12:49:00 crc kubenswrapper[4848]: [+]process-running ok Jan 28 12:49:00 crc kubenswrapper[4848]: healthz check failed Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.349467 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-djhmv" podUID="4dd1be3c-1904-4ce3-9a1b-84a4f2315b96" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.407917 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e74759a3-551c-4681-9cef-1478d3806c4c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.436209 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.436788 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:00.936761574 +0000 UTC m=+167.848978612 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.444803 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.454068 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-84b8w" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.538200 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.540066 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.541926 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.041898721 +0000 UTC m=+167.954115759 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.641995 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.643308 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.143275798 +0000 UTC m=+168.055492836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.675749 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.675812 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.695863 4848 patch_prober.go:28] interesting pod/console-f9d7485db-5dbnv container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.5:8443/health\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.695969 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5dbnv" podUID="c3bd5c0e-2656-4237-a9ab-e4de84101595" containerName="console" probeResult="failure" output="Get \"https://10.217.0.5:8443/health\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.718666 4848 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-tmwll container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.718747 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" podUID="f0bcbb8b-0e11-45fc-a632-2d869e47c651" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.733344 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c024f265-eb17-4ea2-82bf-6b5de7fd7b15","Type":"ContainerStarted","Data":"3c9b665fe2e3527cdaf1059718d0ef5d9ce4f31af7c4f9757a18d154a22d6ee6"} Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.742872 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" event={"ID":"265f5d9c-2988-4a0b-9353-b426422d4c48","Type":"ContainerStarted","Data":"cd6798e61c754a5ea2b1245ccc430c6e77fd09590216d1adad33254f04219ecb"} Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.744711 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.748396 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.248376065 +0000 UTC m=+168.160593103 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.752718 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" event={"ID":"67e8427e-1a00-4ee0-a364-badb08b1cd8d","Type":"ContainerStarted","Data":"de274c438555c031623a81c5f233bd1d91457ccaedd997fe8e9b6eaff429c0ab"} Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.755653 4848 patch_prober.go:28] interesting pod/downloads-7954f5f757-jltf4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.755713 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-jltf4" podUID="db02dfdf-9c10-4e70-80e6-29385127d7d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.761229 4848 patch_prober.go:28] interesting pod/downloads-7954f5f757-jltf4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.761343 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jltf4" podUID="db02dfdf-9c10-4e70-80e6-29385127d7d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.766561 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.76653863 podStartE2EDuration="3.76653863s" podCreationTimestamp="2026-01-28 12:48:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:00.762781527 +0000 UTC m=+167.674998565" watchObservedRunningTime="2026-01-28 12:49:00.76653863 +0000 UTC m=+167.678755668" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.779950 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-hshc8" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.792845 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" podStartSLOduration=139.792816534 podStartE2EDuration="2m19.792816534s" podCreationTimestamp="2026-01-28 12:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:00.789301638 +0000 UTC m=+167.701518676" watchObservedRunningTime="2026-01-28 12:49:00.792816534 +0000 UTC m=+167.705033572" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.845866 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.847901 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.347857 +0000 UTC m=+168.260074208 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.930057 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.945617 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tmwll" Jan 28 12:49:00 crc kubenswrapper[4848]: I0128 12:49:00.951218 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:00 crc kubenswrapper[4848]: E0128 12:49:00.952066 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.452028522 +0000 UTC m=+168.364245570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.054063 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.056166 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.556131112 +0000 UTC m=+168.468348150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.156548 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.157066 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.657045376 +0000 UTC m=+168.569262424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.257782 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.258070 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.758019591 +0000 UTC m=+168.670236639 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.258559 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.259084 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.759070749 +0000 UTC m=+168.671287797 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.288802 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.335235 4848 patch_prober.go:28] interesting pod/router-default-5444994796-djhmv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:49:01 crc kubenswrapper[4848]: [-]has-synced failed: reason withheld Jan 28 12:49:01 crc kubenswrapper[4848]: [+]process-running ok Jan 28 12:49:01 crc kubenswrapper[4848]: healthz check failed Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.335340 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-djhmv" podUID="4dd1be3c-1904-4ce3-9a1b-84a4f2315b96" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.360042 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.360596 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.86056796 +0000 UTC m=+168.772784998 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.461967 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.462640 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:01.962609433 +0000 UTC m=+168.874826471 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.562878 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.572248 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.072212203 +0000 UTC m=+168.984429241 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.673978 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.674592 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.174566146 +0000 UTC m=+169.086783184 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.759620 4848 generic.go:334] "Generic (PLEG): container finished" podID="c024f265-eb17-4ea2-82bf-6b5de7fd7b15" containerID="3c9b665fe2e3527cdaf1059718d0ef5d9ce4f31af7c4f9757a18d154a22d6ee6" exitCode=0 Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.759742 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c024f265-eb17-4ea2-82bf-6b5de7fd7b15","Type":"ContainerDied","Data":"3c9b665fe2e3527cdaf1059718d0ef5d9ce4f31af7c4f9757a18d154a22d6ee6"} Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.761854 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e74759a3-551c-4681-9cef-1478d3806c4c","Type":"ContainerStarted","Data":"549504f76d85febcc1bc929b6ae426cd51cfcee28d728e8cdfa929609dc7d138"} Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.776085 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.776401 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.276353313 +0000 UTC m=+169.188570351 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.776621 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.777044 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.277034942 +0000 UTC m=+169.189252160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.877784 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.877914 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.377888143 +0000 UTC m=+169.290105181 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.878170 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.880011 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.380000681 +0000 UTC m=+169.292217719 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:01 crc kubenswrapper[4848]: I0128 12:49:01.979990 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:01 crc kubenswrapper[4848]: E0128 12:49:01.980396 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.48037933 +0000 UTC m=+169.392596368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.082441 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.083062 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.583029452 +0000 UTC m=+169.495246490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.186456 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.186674 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.686642938 +0000 UTC m=+169.598859976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.186740 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.187126 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.687118031 +0000 UTC m=+169.599335059 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.288224 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.288643 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.78860849 +0000 UTC m=+169.700825528 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.288820 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.289310 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.789298719 +0000 UTC m=+169.701515757 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.343653 4848 patch_prober.go:28] interesting pod/router-default-5444994796-djhmv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:49:02 crc kubenswrapper[4848]: [-]has-synced failed: reason withheld Jan 28 12:49:02 crc kubenswrapper[4848]: [+]process-running ok Jan 28 12:49:02 crc kubenswrapper[4848]: healthz check failed Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.343749 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-djhmv" podUID="4dd1be3c-1904-4ce3-9a1b-84a4f2315b96" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.387898 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z884b"] Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.389326 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.390017 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.390166 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.89013319 +0000 UTC m=+169.802350228 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.390494 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.390958 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.890946692 +0000 UTC m=+169.803163730 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.397860 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.450933 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z884b"] Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.491308 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.491494 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqvdc\" (UniqueName: \"kubernetes.io/projected/af50828a-cf61-481c-98c3-fb3e7d8de01a-kube-api-access-tqvdc\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.491569 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.991529107 +0000 UTC m=+169.903746145 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.491633 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.491755 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-catalog-content\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.491930 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-utilities\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.492247 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:02.992215476 +0000 UTC m=+169.904432514 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.573068 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d8bpv"] Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.574711 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.577774 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.587764 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d8bpv"] Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.619998 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.620331 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.120228926 +0000 UTC m=+170.032445964 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.620654 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.620776 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-catalog-content\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.620891 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-utilities\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.620995 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqvdc\" (UniqueName: \"kubernetes.io/projected/af50828a-cf61-481c-98c3-fb3e7d8de01a-kube-api-access-tqvdc\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.621140 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.121132231 +0000 UTC m=+170.033349269 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.621783 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-utilities\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.621846 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-catalog-content\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.651181 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqvdc\" (UniqueName: \"kubernetes.io/projected/af50828a-cf61-481c-98c3-fb3e7d8de01a-kube-api-access-tqvdc\") pod \"certified-operators-z884b\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.707979 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.725435 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.725606 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.2255805 +0000 UTC m=+170.137797538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.725659 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.725782 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-catalog-content\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.725850 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x775h\" (UniqueName: \"kubernetes.io/projected/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-kube-api-access-x775h\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.725895 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-utilities\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.726204 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.226197607 +0000 UTC m=+170.138414645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.771581 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-np68v"] Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.772700 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.778979 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e74759a3-551c-4681-9cef-1478d3806c4c","Type":"ContainerStarted","Data":"20703429b540333edbd7479be64d392fe3c9d450aeece2b3e37dc25eb4bd2914"} Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.783047 4848 generic.go:334] "Generic (PLEG): container finished" podID="a78c59e1-105e-4581-a0bb-27c1d78dbdee" containerID="054562811f8e2b39573262f9893cce064e89de0b153147ea7ba0a362026b02be" exitCode=0 Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.783262 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" event={"ID":"a78c59e1-105e-4581-a0bb-27c1d78dbdee","Type":"ContainerDied","Data":"054562811f8e2b39573262f9893cce064e89de0b153147ea7ba0a362026b02be"} Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.787226 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-np68v"] Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.788205 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" event={"ID":"67e8427e-1a00-4ee0-a364-badb08b1cd8d","Type":"ContainerStarted","Data":"e44ccec4def77bf0ed541013ca73ddb46fd606c2f4c6dac9c6107c513378ce18"} Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.827286 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.827557 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-catalog-content\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.827654 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x775h\" (UniqueName: \"kubernetes.io/projected/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-kube-api-access-x775h\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.827702 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-utilities\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.828333 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-utilities\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.828443 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.328415506 +0000 UTC m=+170.240632534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.828682 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-catalog-content\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.864322 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.864286941 podStartE2EDuration="2.864286941s" podCreationTimestamp="2026-01-28 12:49:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:02.830081111 +0000 UTC m=+169.742298149" watchObservedRunningTime="2026-01-28 12:49:02.864286941 +0000 UTC m=+169.776503999" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.891070 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x775h\" (UniqueName: \"kubernetes.io/projected/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-kube-api-access-x775h\") pod \"community-operators-d8bpv\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.924903 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.933180 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-catalog-content\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.933278 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-utilities\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.933322 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.933380 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w28lg\" (UniqueName: \"kubernetes.io/projected/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-kube-api-access-w28lg\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:02 crc kubenswrapper[4848]: E0128 12:49:02.934572 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.434543782 +0000 UTC m=+170.346760820 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.983161 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-27vnd"] Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.984955 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:02 crc kubenswrapper[4848]: I0128 12:49:02.996110 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-27vnd"] Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.031643 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.034353 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.034512 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.534489448 +0000 UTC m=+170.446706486 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.034666 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-utilities\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.034718 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.034765 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.034800 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w28lg\" (UniqueName: \"kubernetes.io/projected/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-kube-api-access-w28lg\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.034897 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-catalog-content\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.035172 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-utilities\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.035696 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.535677741 +0000 UTC m=+170.447894769 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.037535 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-catalog-content\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.043201 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d447736-dd38-45b5-be15-2380dc55ad3d-metrics-certs\") pod \"network-metrics-daemon-wqtnc\" (UID: \"8d447736-dd38-45b5-be15-2380dc55ad3d\") " pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.063295 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w28lg\" (UniqueName: \"kubernetes.io/projected/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-kube-api-access-w28lg\") pod \"certified-operators-np68v\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.076456 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wqtnc" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.097200 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z884b"] Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.098130 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:49:03 crc kubenswrapper[4848]: W0128 12:49:03.111304 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf50828a_cf61_481c_98c3_fb3e7d8de01a.slice/crio-c85cbe970a5c1d90038ccb01267308e154eaa8ed7ac719a85794caf7f4f99ee8 WatchSource:0}: Error finding container c85cbe970a5c1d90038ccb01267308e154eaa8ed7ac719a85794caf7f4f99ee8: Status 404 returned error can't find the container with id c85cbe970a5c1d90038ccb01267308e154eaa8ed7ac719a85794caf7f4f99ee8 Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.147829 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kube-api-access\") pod \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.148022 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.148105 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kubelet-dir\") pod \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\" (UID: \"c024f265-eb17-4ea2-82bf-6b5de7fd7b15\") " Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.148296 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-catalog-content\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.148322 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f998s\" (UniqueName: \"kubernetes.io/projected/5f838c7f-4c63-4856-8991-ae814400975f-kube-api-access-f998s\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.148367 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-utilities\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.149338 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c024f265-eb17-4ea2-82bf-6b5de7fd7b15" (UID: "c024f265-eb17-4ea2-82bf-6b5de7fd7b15"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.149465 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.649443194 +0000 UTC m=+170.561660232 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.161674 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c024f265-eb17-4ea2-82bf-6b5de7fd7b15" (UID: "c024f265-eb17-4ea2-82bf-6b5de7fd7b15"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.250803 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-utilities\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.251360 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.251402 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-catalog-content\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.251426 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f998s\" (UniqueName: \"kubernetes.io/projected/5f838c7f-4c63-4856-8991-ae814400975f-kube-api-access-f998s\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.251503 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.251516 4848 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c024f265-eb17-4ea2-82bf-6b5de7fd7b15-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.252758 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.752743092 +0000 UTC m=+170.664960130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.253034 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-catalog-content\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.253140 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-utilities\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.265955 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d8bpv"] Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.297354 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f998s\" (UniqueName: \"kubernetes.io/projected/5f838c7f-4c63-4856-8991-ae814400975f-kube-api-access-f998s\") pod \"community-operators-27vnd\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.314799 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.331417 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.357131 4848 patch_prober.go:28] interesting pod/router-default-5444994796-djhmv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:49:03 crc kubenswrapper[4848]: [-]has-synced failed: reason withheld Jan 28 12:49:03 crc kubenswrapper[4848]: [+]process-running ok Jan 28 12:49:03 crc kubenswrapper[4848]: healthz check failed Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.357212 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-djhmv" podUID="4dd1be3c-1904-4ce3-9a1b-84a4f2315b96" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.358603 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.358729 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.858678773 +0000 UTC m=+170.770895821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.359735 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.859724881 +0000 UTC m=+170.771941919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.360907 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.381448 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-np68v"] Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.403085 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xqtsj" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.410335 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.463852 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.464657 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:03.964627613 +0000 UTC m=+170.876844831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.478345 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-wqtnc"] Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.503147 4848 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.520836 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.520878 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.566160 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.573978 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.073948095 +0000 UTC m=+170.986165133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.681942 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.684330 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.184304505 +0000 UTC m=+171.096521543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.689271 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-27vnd"] Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.785105 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.785611 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.285588169 +0000 UTC m=+171.197805207 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.806113 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.821046 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27vnd" event={"ID":"5f838c7f-4c63-4856-8991-ae814400975f","Type":"ContainerStarted","Data":"9ec3cd775da0b76a347c7b91a2211c35dcbc92d80b5b86d3737eec486f370ebf"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.825580 4848 generic.go:334] "Generic (PLEG): container finished" podID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerID="02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed" exitCode=0 Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.825982 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8bpv" event={"ID":"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99","Type":"ContainerDied","Data":"02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.826058 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8bpv" event={"ID":"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99","Type":"ContainerStarted","Data":"d78995d884c581c197f1a05b71309b47fae97ce572e5ba39862876ffe979deb4"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.829802 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.831906 4848 generic.go:334] "Generic (PLEG): container finished" podID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerID="29d700b453963d7d4cd7fe6c6e92cb2deef74ac0cd799990c76a2f17846f6443" exitCode=0 Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.832004 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-np68v" event={"ID":"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256","Type":"ContainerDied","Data":"29d700b453963d7d4cd7fe6c6e92cb2deef74ac0cd799990c76a2f17846f6443"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.832051 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-np68v" event={"ID":"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256","Type":"ContainerStarted","Data":"8a2973da92b276012b598e10e698795478e0e49dc9f5bb4eb3ca2359c7e3b906"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.854070 4848 generic.go:334] "Generic (PLEG): container finished" podID="e74759a3-551c-4681-9cef-1478d3806c4c" containerID="20703429b540333edbd7479be64d392fe3c9d450aeece2b3e37dc25eb4bd2914" exitCode=0 Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.856145 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e74759a3-551c-4681-9cef-1478d3806c4c","Type":"ContainerDied","Data":"20703429b540333edbd7479be64d392fe3c9d450aeece2b3e37dc25eb4bd2914"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.866864 4848 generic.go:334] "Generic (PLEG): container finished" podID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerID="934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7" exitCode=0 Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.867442 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z884b" event={"ID":"af50828a-cf61-481c-98c3-fb3e7d8de01a","Type":"ContainerDied","Data":"934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.867518 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z884b" event={"ID":"af50828a-cf61-481c-98c3-fb3e7d8de01a","Type":"ContainerStarted","Data":"c85cbe970a5c1d90038ccb01267308e154eaa8ed7ac719a85794caf7f4f99ee8"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.874617 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" event={"ID":"8d447736-dd38-45b5-be15-2380dc55ad3d","Type":"ContainerStarted","Data":"faf03436ad53c07e0f5e542f9a3093e16d7aa93ca5a79fddd68738dda0b5c685"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.889634 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.891221 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.391172499 +0000 UTC m=+171.303389537 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.917862 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c024f265-eb17-4ea2-82bf-6b5de7fd7b15","Type":"ContainerDied","Data":"238ae41652a41bd5544557c2e8ef257c2b590eda7ff09f0cede23d214bdf9d6a"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.917936 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="238ae41652a41bd5544557c2e8ef257c2b590eda7ff09f0cede23d214bdf9d6a" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.918073 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.926659 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" event={"ID":"67e8427e-1a00-4ee0-a364-badb08b1cd8d","Type":"ContainerStarted","Data":"97ae72c171cd1e2a0a8a6bef8b560773b8563d6c995c7a4dedb8641b543395b7"} Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.938019 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pfwwr" Jan 28 12:49:03 crc kubenswrapper[4848]: I0128 12:49:03.992624 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:03 crc kubenswrapper[4848]: E0128 12:49:03.993165 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.493143321 +0000 UTC m=+171.405360359 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.094366 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:04 crc kubenswrapper[4848]: E0128 12:49:04.094503 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.594471737 +0000 UTC m=+171.506688785 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.094584 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:04 crc kubenswrapper[4848]: E0128 12:49:04.095319 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.595292279 +0000 UTC m=+171.507509477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.199118 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:04 crc kubenswrapper[4848]: E0128 12:49:04.199411 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.699370659 +0000 UTC m=+171.611587697 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.200044 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:04 crc kubenswrapper[4848]: E0128 12:49:04.200668 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:49:04.700647823 +0000 UTC m=+171.612864861 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fsgh8" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.244216 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.288500 4848 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-28T12:49:03.503521961Z","Handler":null,"Name":""} Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.298668 4848 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.298728 4848 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.302088 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.312087 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.347794 4848 patch_prober.go:28] interesting pod/router-default-5444994796-djhmv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:49:04 crc kubenswrapper[4848]: [-]has-synced failed: reason withheld Jan 28 12:49:04 crc kubenswrapper[4848]: [+]process-running ok Jan 28 12:49:04 crc kubenswrapper[4848]: healthz check failed Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.347862 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-djhmv" podUID="4dd1be3c-1904-4ce3-9a1b-84a4f2315b96" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.403696 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjj2r\" (UniqueName: \"kubernetes.io/projected/a78c59e1-105e-4581-a0bb-27c1d78dbdee-kube-api-access-wjj2r\") pod \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.403740 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a78c59e1-105e-4581-a0bb-27c1d78dbdee-config-volume\") pod \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.403761 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a78c59e1-105e-4581-a0bb-27c1d78dbdee-secret-volume\") pod \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\" (UID: \"a78c59e1-105e-4581-a0bb-27c1d78dbdee\") " Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.403898 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.407131 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a78c59e1-105e-4581-a0bb-27c1d78dbdee-config-volume" (OuterVolumeSpecName: "config-volume") pod "a78c59e1-105e-4581-a0bb-27c1d78dbdee" (UID: "a78c59e1-105e-4581-a0bb-27c1d78dbdee"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.411140 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a78c59e1-105e-4581-a0bb-27c1d78dbdee-kube-api-access-wjj2r" (OuterVolumeSpecName: "kube-api-access-wjj2r") pod "a78c59e1-105e-4581-a0bb-27c1d78dbdee" (UID: "a78c59e1-105e-4581-a0bb-27c1d78dbdee"). InnerVolumeSpecName "kube-api-access-wjj2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.425411 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a78c59e1-105e-4581-a0bb-27c1d78dbdee-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a78c59e1-105e-4581-a0bb-27c1d78dbdee" (UID: "a78c59e1-105e-4581-a0bb-27c1d78dbdee"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.443861 4848 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.443920 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.505304 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjj2r\" (UniqueName: \"kubernetes.io/projected/a78c59e1-105e-4581-a0bb-27c1d78dbdee-kube-api-access-wjj2r\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.505343 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a78c59e1-105e-4581-a0bb-27c1d78dbdee-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.505364 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a78c59e1-105e-4581-a0bb-27c1d78dbdee-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.613490 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b7gdr"] Jan 28 12:49:04 crc kubenswrapper[4848]: E0128 12:49:04.613760 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c024f265-eb17-4ea2-82bf-6b5de7fd7b15" containerName="pruner" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.613775 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c024f265-eb17-4ea2-82bf-6b5de7fd7b15" containerName="pruner" Jan 28 12:49:04 crc kubenswrapper[4848]: E0128 12:49:04.613794 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a78c59e1-105e-4581-a0bb-27c1d78dbdee" containerName="collect-profiles" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.613802 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a78c59e1-105e-4581-a0bb-27c1d78dbdee" containerName="collect-profiles" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.613932 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a78c59e1-105e-4581-a0bb-27c1d78dbdee" containerName="collect-profiles" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.613956 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c024f265-eb17-4ea2-82bf-6b5de7fd7b15" containerName="pruner" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.614790 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.618581 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.619868 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fsgh8\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.642386 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7gdr"] Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.711626 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhj5x\" (UniqueName: \"kubernetes.io/projected/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-kube-api-access-dhj5x\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.712018 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-utilities\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.712061 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-catalog-content\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.853995 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhj5x\" (UniqueName: \"kubernetes.io/projected/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-kube-api-access-dhj5x\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.854093 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-utilities\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.854141 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-catalog-content\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.854673 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-utilities\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.854742 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-catalog-content\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.881607 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.930429 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.930754 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhj5x\" (UniqueName: \"kubernetes.io/projected/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-kube-api-access-dhj5x\") pod \"redhat-marketplace-b7gdr\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:04 crc kubenswrapper[4848]: I0128 12:49:04.936592 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:04.999039 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-knlkg"] Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.004659 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.008326 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp" event={"ID":"a78c59e1-105e-4581-a0bb-27c1d78dbdee","Type":"ContainerDied","Data":"cc8cfa51a67c43940d62787c4a47311afc1ee59b10654f52abca1ce8134ca3f2"} Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.008375 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc8cfa51a67c43940d62787c4a47311afc1ee59b10654f52abca1ce8134ca3f2" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.008462 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.015896 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlkg"] Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.036827 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-wgv95" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.049163 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" event={"ID":"67e8427e-1a00-4ee0-a364-badb08b1cd8d","Type":"ContainerStarted","Data":"21d8f53f643f5b7b66cea5d45a1a4294656c4b0ebaf8cba1a303bd1791281587"} Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.058410 4848 generic.go:334] "Generic (PLEG): container finished" podID="5f838c7f-4c63-4856-8991-ae814400975f" containerID="94746d5da669fa5c01764e32a6f0f3943f912b71c19eb251ad57648bee68a9d5" exitCode=0 Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.058520 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27vnd" event={"ID":"5f838c7f-4c63-4856-8991-ae814400975f","Type":"ContainerDied","Data":"94746d5da669fa5c01764e32a6f0f3943f912b71c19eb251ad57648bee68a9d5"} Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.071507 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" event={"ID":"8d447736-dd38-45b5-be15-2380dc55ad3d","Type":"ContainerStarted","Data":"ba1607e0a11a29d389faa423ab46d6c71be9f60d39650d6bc727471b9ec6b507"} Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.071593 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wqtnc" event={"ID":"8d447736-dd38-45b5-be15-2380dc55ad3d","Type":"ContainerStarted","Data":"22f548f11344997f5fab0c2fc1e8c0062e64a8f33498f11ada5a7b926924212c"} Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.108610 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-dkwtc" podStartSLOduration=15.108583528 podStartE2EDuration="15.108583528s" podCreationTimestamp="2026-01-28 12:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:05.10718996 +0000 UTC m=+172.019407018" watchObservedRunningTime="2026-01-28 12:49:05.108583528 +0000 UTC m=+172.020800566" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.159153 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-utilities\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.159207 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dldp2\" (UniqueName: \"kubernetes.io/projected/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-kube-api-access-dldp2\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.159268 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-catalog-content\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.213623 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-wqtnc" podStartSLOduration=145.213582502 podStartE2EDuration="2m25.213582502s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:05.20284162 +0000 UTC m=+172.115058658" watchObservedRunningTime="2026-01-28 12:49:05.213582502 +0000 UTC m=+172.125799540" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.260023 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-catalog-content\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.260185 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-utilities\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.260213 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dldp2\" (UniqueName: \"kubernetes.io/projected/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-kube-api-access-dldp2\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.263369 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-catalog-content\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.263756 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-utilities\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.308687 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dldp2\" (UniqueName: \"kubernetes.io/projected/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-kube-api-access-dldp2\") pod \"redhat-marketplace-knlkg\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.335821 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.343560 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-djhmv" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.363170 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.595525 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c6gkd"] Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.597679 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.606633 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.615890 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6gkd"] Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.689597 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.693310 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-catalog-content\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.693418 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-utilities\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.693447 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdnrr\" (UniqueName: \"kubernetes.io/projected/26dc23f3-cba2-4cb1-9cf6-7402896c876d-kube-api-access-kdnrr\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.783945 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7gdr"] Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.794581 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e74759a3-551c-4681-9cef-1478d3806c4c-kubelet-dir\") pod \"e74759a3-551c-4681-9cef-1478d3806c4c\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.794858 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e74759a3-551c-4681-9cef-1478d3806c4c-kube-api-access\") pod \"e74759a3-551c-4681-9cef-1478d3806c4c\" (UID: \"e74759a3-551c-4681-9cef-1478d3806c4c\") " Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.794851 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e74759a3-551c-4681-9cef-1478d3806c4c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e74759a3-551c-4681-9cef-1478d3806c4c" (UID: "e74759a3-551c-4681-9cef-1478d3806c4c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.795179 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-utilities\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.795237 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdnrr\" (UniqueName: \"kubernetes.io/projected/26dc23f3-cba2-4cb1-9cf6-7402896c876d-kube-api-access-kdnrr\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.795350 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-catalog-content\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.795427 4848 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e74759a3-551c-4681-9cef-1478d3806c4c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.795673 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-utilities\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.795921 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-catalog-content\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.818972 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdnrr\" (UniqueName: \"kubernetes.io/projected/26dc23f3-cba2-4cb1-9cf6-7402896c876d-kube-api-access-kdnrr\") pod \"redhat-operators-c6gkd\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.819181 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e74759a3-551c-4681-9cef-1478d3806c4c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e74759a3-551c-4681-9cef-1478d3806c4c" (UID: "e74759a3-551c-4681-9cef-1478d3806c4c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.833871 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fsgh8"] Jan 28 12:49:05 crc kubenswrapper[4848]: W0128 12:49:05.847047 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e357b5a_bdd4_4681_a70d_afaf1275f5e4.slice/crio-2ba5cc61fe4efbb1fa0e66571e77e9c4d39fbc64e9b080f096dc922c94eea77c WatchSource:0}: Error finding container 2ba5cc61fe4efbb1fa0e66571e77e9c4d39fbc64e9b080f096dc922c94eea77c: Status 404 returned error can't find the container with id 2ba5cc61fe4efbb1fa0e66571e77e9c4d39fbc64e9b080f096dc922c94eea77c Jan 28 12:49:05 crc kubenswrapper[4848]: W0128 12:49:05.882346 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1929eb16_0432_46a9_871d_3a2d75f37d7a.slice/crio-2fef46230d113794d7047a92200c68fff286525a489507c8bd79057d5a24bf69 WatchSource:0}: Error finding container 2fef46230d113794d7047a92200c68fff286525a489507c8bd79057d5a24bf69: Status 404 returned error can't find the container with id 2fef46230d113794d7047a92200c68fff286525a489507c8bd79057d5a24bf69 Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.897880 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e74759a3-551c-4681-9cef-1478d3806c4c-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.905826 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlkg"] Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.942638 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.977216 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4zt82"] Jan 28 12:49:05 crc kubenswrapper[4848]: E0128 12:49:05.980020 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e74759a3-551c-4681-9cef-1478d3806c4c" containerName="pruner" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.980050 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="e74759a3-551c-4681-9cef-1478d3806c4c" containerName="pruner" Jan 28 12:49:05 crc kubenswrapper[4848]: I0128 12:49:05.980377 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="e74759a3-551c-4681-9cef-1478d3806c4c" containerName="pruner" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.043442 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.043994 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4zt82"] Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.203260 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7gdr" event={"ID":"8e357b5a-bdd4-4681-a70d-afaf1275f5e4","Type":"ContainerStarted","Data":"44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3"} Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.203362 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7gdr" event={"ID":"8e357b5a-bdd4-4681-a70d-afaf1275f5e4","Type":"ContainerStarted","Data":"2ba5cc61fe4efbb1fa0e66571e77e9c4d39fbc64e9b080f096dc922c94eea77c"} Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.220844 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-catalog-content\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.220994 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fnch\" (UniqueName: \"kubernetes.io/projected/ff062c59-745a-4664-b98f-f2fb669edf1f-kube-api-access-5fnch\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.221035 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-utilities\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.226304 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" event={"ID":"1929eb16-0432-46a9-871d-3a2d75f37d7a","Type":"ContainerStarted","Data":"2fef46230d113794d7047a92200c68fff286525a489507c8bd79057d5a24bf69"} Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.251278 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e74759a3-551c-4681-9cef-1478d3806c4c","Type":"ContainerDied","Data":"549504f76d85febcc1bc929b6ae426cd51cfcee28d728e8cdfa929609dc7d138"} Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.251326 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="549504f76d85febcc1bc929b6ae426cd51cfcee28d728e8cdfa929609dc7d138" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.251365 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.253435 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlkg" event={"ID":"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4","Type":"ContainerStarted","Data":"aa4f3c3fc198112feaf9f6ce3e631845b908b45dc2f793543521e26386dafb63"} Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.326752 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fnch\" (UniqueName: \"kubernetes.io/projected/ff062c59-745a-4664-b98f-f2fb669edf1f-kube-api-access-5fnch\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.327650 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-utilities\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.327708 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-catalog-content\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.328315 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-catalog-content\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.328336 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-utilities\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.328601 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6gkd"] Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.361660 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fnch\" (UniqueName: \"kubernetes.io/projected/ff062c59-745a-4664-b98f-f2fb669edf1f-kube-api-access-5fnch\") pod \"redhat-operators-4zt82\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: W0128 12:49:06.435002 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26dc23f3_cba2_4cb1_9cf6_7402896c876d.slice/crio-a8860644a5202775c4a01ab8f5fd3e3c76084a452ca561d9738292f27b18b610 WatchSource:0}: Error finding container a8860644a5202775c4a01ab8f5fd3e3c76084a452ca561d9738292f27b18b610: Status 404 returned error can't find the container with id a8860644a5202775c4a01ab8f5fd3e3c76084a452ca561d9738292f27b18b610 Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.526414 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:49:06 crc kubenswrapper[4848]: I0128 12:49:06.978874 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4zt82"] Jan 28 12:49:07 crc kubenswrapper[4848]: W0128 12:49:07.026943 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff062c59_745a_4664_b98f_f2fb669edf1f.slice/crio-f7db86905e411f4142250af532e0914a5a18894d8d3ae56accaf10665b4c1bd7 WatchSource:0}: Error finding container f7db86905e411f4142250af532e0914a5a18894d8d3ae56accaf10665b4c1bd7: Status 404 returned error can't find the container with id f7db86905e411f4142250af532e0914a5a18894d8d3ae56accaf10665b4c1bd7 Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.280745 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" event={"ID":"1929eb16-0432-46a9-871d-3a2d75f37d7a","Type":"ContainerStarted","Data":"33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e"} Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.281726 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.287150 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zt82" event={"ID":"ff062c59-745a-4664-b98f-f2fb669edf1f","Type":"ContainerStarted","Data":"f7db86905e411f4142250af532e0914a5a18894d8d3ae56accaf10665b4c1bd7"} Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.291559 4848 generic.go:334] "Generic (PLEG): container finished" podID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerID="dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29" exitCode=0 Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.291721 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlkg" event={"ID":"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4","Type":"ContainerDied","Data":"dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29"} Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.308862 4848 generic.go:334] "Generic (PLEG): container finished" podID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerID="44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3" exitCode=0 Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.308943 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7gdr" event={"ID":"8e357b5a-bdd4-4681-a70d-afaf1275f5e4","Type":"ContainerDied","Data":"44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3"} Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.314521 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" podStartSLOduration=147.314502931 podStartE2EDuration="2m27.314502931s" podCreationTimestamp="2026-01-28 12:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:07.308037125 +0000 UTC m=+174.220254163" watchObservedRunningTime="2026-01-28 12:49:07.314502931 +0000 UTC m=+174.226719969" Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.318564 4848 generic.go:334] "Generic (PLEG): container finished" podID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerID="0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f" exitCode=0 Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.318613 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6gkd" event={"ID":"26dc23f3-cba2-4cb1-9cf6-7402896c876d","Type":"ContainerDied","Data":"0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f"} Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.318642 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6gkd" event={"ID":"26dc23f3-cba2-4cb1-9cf6-7402896c876d","Type":"ContainerStarted","Data":"a8860644a5202775c4a01ab8f5fd3e3c76084a452ca561d9738292f27b18b610"} Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.924408 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:49:07 crc kubenswrapper[4848]: I0128 12:49:07.924467 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:49:08 crc kubenswrapper[4848]: I0128 12:49:08.341190 4848 generic.go:334] "Generic (PLEG): container finished" podID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerID="910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d" exitCode=0 Jan 28 12:49:08 crc kubenswrapper[4848]: I0128 12:49:08.341495 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zt82" event={"ID":"ff062c59-745a-4664-b98f-f2fb669edf1f","Type":"ContainerDied","Data":"910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d"} Jan 28 12:49:10 crc kubenswrapper[4848]: I0128 12:49:10.680783 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:49:10 crc kubenswrapper[4848]: I0128 12:49:10.685276 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 12:49:10 crc kubenswrapper[4848]: I0128 12:49:10.762679 4848 patch_prober.go:28] interesting pod/downloads-7954f5f757-jltf4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Jan 28 12:49:10 crc kubenswrapper[4848]: I0128 12:49:10.762954 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jltf4" podUID="db02dfdf-9c10-4e70-80e6-29385127d7d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Jan 28 12:49:10 crc kubenswrapper[4848]: I0128 12:49:10.762976 4848 patch_prober.go:28] interesting pod/downloads-7954f5f757-jltf4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Jan 28 12:49:10 crc kubenswrapper[4848]: I0128 12:49:10.763043 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-jltf4" podUID="db02dfdf-9c10-4e70-80e6-29385127d7d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Jan 28 12:49:14 crc kubenswrapper[4848]: I0128 12:49:14.461655 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zwpd8"] Jan 28 12:49:14 crc kubenswrapper[4848]: I0128 12:49:14.462149 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" podUID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" containerName="controller-manager" containerID="cri-o://09823452889d46ebb3edea438265776604c65d6a20e04d9b1d5850799c5eaa06" gracePeriod=30 Jan 28 12:49:14 crc kubenswrapper[4848]: I0128 12:49:14.494172 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5"] Jan 28 12:49:14 crc kubenswrapper[4848]: I0128 12:49:14.496698 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" containerID="cri-o://2b7d9c8e4c967e146ab89a27478347f766ededcf1487f8d688ea98a2fce0fa28" gracePeriod=30 Jan 28 12:49:15 crc kubenswrapper[4848]: I0128 12:49:15.433146 4848 generic.go:334] "Generic (PLEG): container finished" podID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerID="2b7d9c8e4c967e146ab89a27478347f766ededcf1487f8d688ea98a2fce0fa28" exitCode=0 Jan 28 12:49:15 crc kubenswrapper[4848]: I0128 12:49:15.433211 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" event={"ID":"6471a57b-f563-440e-9fa7-2c24af8039c9","Type":"ContainerDied","Data":"2b7d9c8e4c967e146ab89a27478347f766ededcf1487f8d688ea98a2fce0fa28"} Jan 28 12:49:16 crc kubenswrapper[4848]: I0128 12:49:16.444414 4848 generic.go:334] "Generic (PLEG): container finished" podID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" containerID="09823452889d46ebb3edea438265776604c65d6a20e04d9b1d5850799c5eaa06" exitCode=0 Jan 28 12:49:16 crc kubenswrapper[4848]: I0128 12:49:16.444483 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" event={"ID":"365c6d6e-80a0-4818-b1dc-093bddc9a5a4","Type":"ContainerDied","Data":"09823452889d46ebb3edea438265776604c65d6a20e04d9b1d5850799c5eaa06"} Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.835869 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.875359 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-74569667d8-tzxgx"] Jan 28 12:49:19 crc kubenswrapper[4848]: E0128 12:49:19.877133 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" containerName="controller-manager" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.877172 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" containerName="controller-manager" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.877389 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" containerName="controller-manager" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.878016 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.882736 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74569667d8-tzxgx"] Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.960740 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-proxy-ca-bundles\") pod \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.961064 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tss6v\" (UniqueName: \"kubernetes.io/projected/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-kube-api-access-tss6v\") pod \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.961148 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-config\") pod \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962048 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "365c6d6e-80a0-4818-b1dc-093bddc9a5a4" (UID: "365c6d6e-80a0-4818-b1dc-093bddc9a5a4"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962164 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-config" (OuterVolumeSpecName: "config") pod "365c6d6e-80a0-4818-b1dc-093bddc9a5a4" (UID: "365c6d6e-80a0-4818-b1dc-093bddc9a5a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962224 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-serving-cert\") pod \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962358 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-client-ca\") pod \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\" (UID: \"365c6d6e-80a0-4818-b1dc-093bddc9a5a4\") " Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962604 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-proxy-ca-bundles\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962729 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqj6b\" (UniqueName: \"kubernetes.io/projected/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-kube-api-access-mqj6b\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962766 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-serving-cert\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962795 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-client-ca\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962864 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-config\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962915 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.962928 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.963410 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-client-ca" (OuterVolumeSpecName: "client-ca") pod "365c6d6e-80a0-4818-b1dc-093bddc9a5a4" (UID: "365c6d6e-80a0-4818-b1dc-093bddc9a5a4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.969071 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "365c6d6e-80a0-4818-b1dc-093bddc9a5a4" (UID: "365c6d6e-80a0-4818-b1dc-093bddc9a5a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4848]: I0128 12:49:19.970023 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-kube-api-access-tss6v" (OuterVolumeSpecName: "kube-api-access-tss6v") pod "365c6d6e-80a0-4818-b1dc-093bddc9a5a4" (UID: "365c6d6e-80a0-4818-b1dc-093bddc9a5a4"). InnerVolumeSpecName "kube-api-access-tss6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.084276 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-proxy-ca-bundles\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.084818 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqj6b\" (UniqueName: \"kubernetes.io/projected/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-kube-api-access-mqj6b\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.084960 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-serving-cert\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.085073 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-client-ca\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.085196 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-config\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.085374 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.085523 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.086840 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tss6v\" (UniqueName: \"kubernetes.io/projected/365c6d6e-80a0-4818-b1dc-093bddc9a5a4-kube-api-access-tss6v\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.086595 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-proxy-ca-bundles\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.086870 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-config\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.086439 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-client-ca\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.089175 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-serving-cert\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.103660 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqj6b\" (UniqueName: \"kubernetes.io/projected/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-kube-api-access-mqj6b\") pod \"controller-manager-74569667d8-tzxgx\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.197122 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.480123 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" event={"ID":"365c6d6e-80a0-4818-b1dc-093bddc9a5a4","Type":"ContainerDied","Data":"5a8903b915dd723dce5b33e2f73d115a6dd5dc0017b5f955aab10852601e810c"} Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.480199 4848 scope.go:117] "RemoveContainer" containerID="09823452889d46ebb3edea438265776604c65d6a20e04d9b1d5850799c5eaa06" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.480431 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zwpd8" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.516721 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zwpd8"] Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.520664 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zwpd8"] Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.761701 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-jltf4" Jan 28 12:49:20 crc kubenswrapper[4848]: I0128 12:49:20.857546 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="365c6d6e-80a0-4818-b1dc-093bddc9a5a4" path="/var/lib/kubelet/pods/365c6d6e-80a0-4818-b1dc-093bddc9a5a4/volumes" Jan 28 12:49:22 crc kubenswrapper[4848]: I0128 12:49:22.603146 4848 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-j4xw5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:49:22 crc kubenswrapper[4848]: I0128 12:49:22.603613 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:49:24 crc kubenswrapper[4848]: I0128 12:49:24.939709 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:49:27 crc kubenswrapper[4848]: I0128 12:49:27.888122 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:49:32 crc kubenswrapper[4848]: I0128 12:49:32.603161 4848 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-j4xw5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:49:32 crc kubenswrapper[4848]: I0128 12:49:32.603534 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:49:32 crc kubenswrapper[4848]: I0128 12:49:32.999249 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jvwtg" Jan 28 12:49:34 crc kubenswrapper[4848]: I0128 12:49:34.440767 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74569667d8-tzxgx"] Jan 28 12:49:37 crc kubenswrapper[4848]: I0128 12:49:37.924968 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:49:37 crc kubenswrapper[4848]: I0128 12:49:37.925616 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:49:39 crc kubenswrapper[4848]: I0128 12:49:39.890630 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 12:49:39 crc kubenswrapper[4848]: I0128 12:49:39.891612 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:39 crc kubenswrapper[4848]: I0128 12:49:39.893994 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 12:49:39 crc kubenswrapper[4848]: I0128 12:49:39.894036 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 12:49:39 crc kubenswrapper[4848]: I0128 12:49:39.895118 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 12:49:39 crc kubenswrapper[4848]: I0128 12:49:39.983089 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:39 crc kubenswrapper[4848]: I0128 12:49:39.983352 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:40 crc kubenswrapper[4848]: I0128 12:49:40.085657 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:40 crc kubenswrapper[4848]: I0128 12:49:40.085713 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:40 crc kubenswrapper[4848]: I0128 12:49:40.085862 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:40 crc kubenswrapper[4848]: I0128 12:49:40.290237 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:40 crc kubenswrapper[4848]: I0128 12:49:40.517028 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.091377 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.114605 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz"] Jan 28 12:49:42 crc kubenswrapper[4848]: E0128 12:49:42.114845 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.114861 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.114992 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.115471 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.126438 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz"] Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.214373 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-544kr\" (UniqueName: \"kubernetes.io/projected/6471a57b-f563-440e-9fa7-2c24af8039c9-kube-api-access-544kr\") pod \"6471a57b-f563-440e-9fa7-2c24af8039c9\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.214476 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-config\") pod \"6471a57b-f563-440e-9fa7-2c24af8039c9\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.214505 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6471a57b-f563-440e-9fa7-2c24af8039c9-serving-cert\") pod \"6471a57b-f563-440e-9fa7-2c24af8039c9\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.214741 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-client-ca\") pod \"6471a57b-f563-440e-9fa7-2c24af8039c9\" (UID: \"6471a57b-f563-440e-9fa7-2c24af8039c9\") " Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.215318 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-config\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.215512 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-client-ca" (OuterVolumeSpecName: "client-ca") pod "6471a57b-f563-440e-9fa7-2c24af8039c9" (UID: "6471a57b-f563-440e-9fa7-2c24af8039c9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.215559 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-config" (OuterVolumeSpecName: "config") pod "6471a57b-f563-440e-9fa7-2c24af8039c9" (UID: "6471a57b-f563-440e-9fa7-2c24af8039c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.215694 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmdzx\" (UniqueName: \"kubernetes.io/projected/921d3a1d-6fb7-4421-9f1f-fba49ff10219-kube-api-access-mmdzx\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.215821 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/921d3a1d-6fb7-4421-9f1f-fba49ff10219-serving-cert\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.215875 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-client-ca\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.216024 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.216042 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6471a57b-f563-440e-9fa7-2c24af8039c9-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.220031 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6471a57b-f563-440e-9fa7-2c24af8039c9-kube-api-access-544kr" (OuterVolumeSpecName: "kube-api-access-544kr") pod "6471a57b-f563-440e-9fa7-2c24af8039c9" (UID: "6471a57b-f563-440e-9fa7-2c24af8039c9"). InnerVolumeSpecName "kube-api-access-544kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.220480 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6471a57b-f563-440e-9fa7-2c24af8039c9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6471a57b-f563-440e-9fa7-2c24af8039c9" (UID: "6471a57b-f563-440e-9fa7-2c24af8039c9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.319011 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-client-ca\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.319133 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-config\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.319377 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmdzx\" (UniqueName: \"kubernetes.io/projected/921d3a1d-6fb7-4421-9f1f-fba49ff10219-kube-api-access-mmdzx\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.319453 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/921d3a1d-6fb7-4421-9f1f-fba49ff10219-serving-cert\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.319947 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-544kr\" (UniqueName: \"kubernetes.io/projected/6471a57b-f563-440e-9fa7-2c24af8039c9-kube-api-access-544kr\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.320020 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6471a57b-f563-440e-9fa7-2c24af8039c9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.326313 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-client-ca\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.326489 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-config\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.327362 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/921d3a1d-6fb7-4421-9f1f-fba49ff10219-serving-cert\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.349797 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmdzx\" (UniqueName: \"kubernetes.io/projected/921d3a1d-6fb7-4421-9f1f-fba49ff10219-kube-api-access-mmdzx\") pod \"route-controller-manager-7b88d6b4c9-8k5vz\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: E0128 12:49:42.383766 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 12:49:42 crc kubenswrapper[4848]: E0128 12:49:42.383933 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dhj5x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-b7gdr_openshift-marketplace(8e357b5a-bdd4-4681-a70d-afaf1275f5e4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:49:42 crc kubenswrapper[4848]: E0128 12:49:42.385183 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-b7gdr" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.436611 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.602610 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" event={"ID":"6471a57b-f563-440e-9fa7-2c24af8039c9","Type":"ContainerDied","Data":"b7a7a3a17eae0608187a089c151b018150297b93b1a2ef1f85b7b4abb30617bd"} Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.602821 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.603319 4848 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-j4xw5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": context deadline exceeded" start-of-body= Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.603400 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": context deadline exceeded" Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.649272 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5"] Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.652756 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-j4xw5"] Jan 28 12:49:42 crc kubenswrapper[4848]: I0128 12:49:42.871309 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6471a57b-f563-440e-9fa7-2c24af8039c9" path="/var/lib/kubelet/pods/6471a57b-f563-440e-9fa7-2c24af8039c9/volumes" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.288186 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.289512 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.300433 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.448341 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-kubelet-dir\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.448430 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f754cdc-1dde-4331-842d-824d719b4255-kube-api-access\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.448558 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-var-lock\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.549604 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-kubelet-dir\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.549698 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f754cdc-1dde-4331-842d-824d719b4255-kube-api-access\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.549751 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-var-lock\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.549772 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-kubelet-dir\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.549839 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-var-lock\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.568969 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f754cdc-1dde-4331-842d-824d719b4255-kube-api-access\") pod \"installer-9-crc\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:44 crc kubenswrapper[4848]: I0128 12:49:44.635461 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:49:49 crc kubenswrapper[4848]: E0128 12:49:49.883734 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-b7gdr" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" Jan 28 12:49:49 crc kubenswrapper[4848]: E0128 12:49:49.967623 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 12:49:49 crc kubenswrapper[4848]: E0128 12:49:49.967823 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5fnch,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-4zt82_openshift-marketplace(ff062c59-745a-4664-b98f-f2fb669edf1f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:49:49 crc kubenswrapper[4848]: E0128 12:49:49.969036 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-4zt82" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" Jan 28 12:49:51 crc kubenswrapper[4848]: E0128 12:49:51.170265 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-4zt82" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" Jan 28 12:49:51 crc kubenswrapper[4848]: E0128 12:49:51.269168 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 12:49:51 crc kubenswrapper[4848]: E0128 12:49:51.269723 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x775h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-d8bpv_openshift-marketplace(4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:49:51 crc kubenswrapper[4848]: E0128 12:49:51.270903 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-d8bpv" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" Jan 28 12:49:51 crc kubenswrapper[4848]: E0128 12:49:51.276142 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 12:49:51 crc kubenswrapper[4848]: E0128 12:49:51.276276 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f998s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-27vnd_openshift-marketplace(5f838c7f-4c63-4856-8991-ae814400975f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:49:51 crc kubenswrapper[4848]: E0128 12:49:51.277581 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-27vnd" podUID="5f838c7f-4c63-4856-8991-ae814400975f" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.640087 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-27vnd" podUID="5f838c7f-4c63-4856-8991-ae814400975f" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.640190 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-d8bpv" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" Jan 28 12:49:52 crc kubenswrapper[4848]: I0128 12:49:52.654798 4848 scope.go:117] "RemoveContainer" containerID="2b7d9c8e4c967e146ab89a27478347f766ededcf1487f8d688ea98a2fce0fa28" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.723762 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.723913 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tqvdc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-z884b_openshift-marketplace(af50828a-cf61-481c-98c3-fb3e7d8de01a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.725096 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-z884b" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.734539 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.734711 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kdnrr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-c6gkd_openshift-marketplace(26dc23f3-cba2-4cb1-9cf6-7402896c876d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.736651 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-c6gkd" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.844001 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.844499 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w28lg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-np68v_openshift-marketplace(07ba7451-b14b-4eaa-9ed3-6fca9ab7d256): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:49:52 crc kubenswrapper[4848]: E0128 12:49:52.845731 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-np68v" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" Jan 28 12:49:52 crc kubenswrapper[4848]: I0128 12:49:52.932594 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz"] Jan 28 12:49:52 crc kubenswrapper[4848]: W0128 12:49:52.933777 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod921d3a1d_6fb7_4421_9f1f_fba49ff10219.slice/crio-25d306afc08c006174636b073b71ba141b917d9498da4ab6aae3e0101e00cf3a WatchSource:0}: Error finding container 25d306afc08c006174636b073b71ba141b917d9498da4ab6aae3e0101e00cf3a: Status 404 returned error can't find the container with id 25d306afc08c006174636b073b71ba141b917d9498da4ab6aae3e0101e00cf3a Jan 28 12:49:52 crc kubenswrapper[4848]: I0128 12:49:52.939426 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74569667d8-tzxgx"] Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.004966 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 12:49:53 crc kubenswrapper[4848]: W0128 12:49:53.026791 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod8ec2e3f4_25e6_4207_a91c_d445becad8ff.slice/crio-513794bbea14497ff8256b93e42f4d755c3bb20bf1e12133225e6556bacad4e5 WatchSource:0}: Error finding container 513794bbea14497ff8256b93e42f4d755c3bb20bf1e12133225e6556bacad4e5: Status 404 returned error can't find the container with id 513794bbea14497ff8256b93e42f4d755c3bb20bf1e12133225e6556bacad4e5 Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.184526 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 12:49:53 crc kubenswrapper[4848]: W0128 12:49:53.210342 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6f754cdc_1dde_4331_842d_824d719b4255.slice/crio-2ba495056b289ee21088c426f7e3d697409a96c521e01f3e4f00049d4c113fcb WatchSource:0}: Error finding container 2ba495056b289ee21088c426f7e3d697409a96c521e01f3e4f00049d4c113fcb: Status 404 returned error can't find the container with id 2ba495056b289ee21088c426f7e3d697409a96c521e01f3e4f00049d4c113fcb Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.663954 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" event={"ID":"cfeeb942-43cd-41c4-bc67-df76b9b38b0e","Type":"ContainerStarted","Data":"37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.664331 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" event={"ID":"cfeeb942-43cd-41c4-bc67-df76b9b38b0e","Type":"ContainerStarted","Data":"f90a722abd96342d337b7d904e6bcf00bbf5c80d508b97d28a7561ae8718ad6e"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.664359 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.664012 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" podUID="cfeeb942-43cd-41c4-bc67-df76b9b38b0e" containerName="controller-manager" containerID="cri-o://37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481" gracePeriod=30 Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.667151 4848 generic.go:334] "Generic (PLEG): container finished" podID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerID="504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49" exitCode=0 Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.667287 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlkg" event={"ID":"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4","Type":"ContainerDied","Data":"504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.669570 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8ec2e3f4-25e6-4207-a91c-d445becad8ff","Type":"ContainerStarted","Data":"52e3ce49b832f81c89594cd46324d6da0ec2c38d73b28495c45d9c04dce59cdc"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.669770 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8ec2e3f4-25e6-4207-a91c-d445becad8ff","Type":"ContainerStarted","Data":"513794bbea14497ff8256b93e42f4d755c3bb20bf1e12133225e6556bacad4e5"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.674644 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"6f754cdc-1dde-4331-842d-824d719b4255","Type":"ContainerStarted","Data":"ea3c6d016f6957e97e24f4cac3d755d032f6fdf69d51bef447b6c5b4187a085d"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.674712 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"6f754cdc-1dde-4331-842d-824d719b4255","Type":"ContainerStarted","Data":"2ba495056b289ee21088c426f7e3d697409a96c521e01f3e4f00049d4c113fcb"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.685538 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" event={"ID":"921d3a1d-6fb7-4421-9f1f-fba49ff10219","Type":"ContainerStarted","Data":"af4268c489b76b5cdb3611e17c5c6b9c580cc5559c9bdb7926e364f126bb4fea"} Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.685595 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" event={"ID":"921d3a1d-6fb7-4421-9f1f-fba49ff10219","Type":"ContainerStarted","Data":"25d306afc08c006174636b073b71ba141b917d9498da4ab6aae3e0101e00cf3a"} Jan 28 12:49:53 crc kubenswrapper[4848]: E0128 12:49:53.687475 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-c6gkd" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" Jan 28 12:49:53 crc kubenswrapper[4848]: E0128 12:49:53.687492 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-np68v" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" Jan 28 12:49:53 crc kubenswrapper[4848]: E0128 12:49:53.689074 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-z884b" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.689312 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" podStartSLOduration=39.689291885 podStartE2EDuration="39.689291885s" podCreationTimestamp="2026-01-28 12:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:53.685873525 +0000 UTC m=+220.598090583" watchObservedRunningTime="2026-01-28 12:49:53.689291885 +0000 UTC m=+220.601508923" Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.701127 4848 patch_prober.go:28] interesting pod/controller-manager-74569667d8-tzxgx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.55:8443/healthz\": EOF" start-of-body= Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.701282 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" podUID="cfeeb942-43cd-41c4-bc67-df76b9b38b0e" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.55:8443/healthz\": EOF" Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.727859 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=14.727835319 podStartE2EDuration="14.727835319s" podCreationTimestamp="2026-01-28 12:49:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:53.725551182 +0000 UTC m=+220.637768230" watchObservedRunningTime="2026-01-28 12:49:53.727835319 +0000 UTC m=+220.640052357" Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.748154 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=9.748123071 podStartE2EDuration="9.748123071s" podCreationTimestamp="2026-01-28 12:49:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:53.7432942 +0000 UTC m=+220.655511258" watchObservedRunningTime="2026-01-28 12:49:53.748123071 +0000 UTC m=+220.660340109" Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.803597 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" podStartSLOduration=19.803576218 podStartE2EDuration="19.803576218s" podCreationTimestamp="2026-01-28 12:49:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:53.785966025 +0000 UTC m=+220.698183063" watchObservedRunningTime="2026-01-28 12:49:53.803576218 +0000 UTC m=+220.715793256" Jan 28 12:49:53 crc kubenswrapper[4848]: I0128 12:49:53.998665 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.027044 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-747759fcdd-s8xf7"] Jan 28 12:49:54 crc kubenswrapper[4848]: E0128 12:49:54.027486 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfeeb942-43cd-41c4-bc67-df76b9b38b0e" containerName="controller-manager" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.027702 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfeeb942-43cd-41c4-bc67-df76b9b38b0e" containerName="controller-manager" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.027911 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfeeb942-43cd-41c4-bc67-df76b9b38b0e" containerName="controller-manager" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.028363 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.038812 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-747759fcdd-s8xf7"] Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091522 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-serving-cert\") pod \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091590 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-proxy-ca-bundles\") pod \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091623 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqj6b\" (UniqueName: \"kubernetes.io/projected/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-kube-api-access-mqj6b\") pod \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091662 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-client-ca\") pod \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091687 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-config\") pod \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\" (UID: \"cfeeb942-43cd-41c4-bc67-df76b9b38b0e\") " Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091807 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbhsn\" (UniqueName: \"kubernetes.io/projected/5eeba233-1e73-4c57-b3a5-03f9552cfc14-kube-api-access-sbhsn\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091840 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-proxy-ca-bundles\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091877 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-client-ca\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091905 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eeba233-1e73-4c57-b3a5-03f9552cfc14-serving-cert\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.091990 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-config\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.092591 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "cfeeb942-43cd-41c4-bc67-df76b9b38b0e" (UID: "cfeeb942-43cd-41c4-bc67-df76b9b38b0e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.093389 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-client-ca" (OuterVolumeSpecName: "client-ca") pod "cfeeb942-43cd-41c4-bc67-df76b9b38b0e" (UID: "cfeeb942-43cd-41c4-bc67-df76b9b38b0e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.093962 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-config" (OuterVolumeSpecName: "config") pod "cfeeb942-43cd-41c4-bc67-df76b9b38b0e" (UID: "cfeeb942-43cd-41c4-bc67-df76b9b38b0e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.097068 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "cfeeb942-43cd-41c4-bc67-df76b9b38b0e" (UID: "cfeeb942-43cd-41c4-bc67-df76b9b38b0e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.097145 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-kube-api-access-mqj6b" (OuterVolumeSpecName: "kube-api-access-mqj6b") pod "cfeeb942-43cd-41c4-bc67-df76b9b38b0e" (UID: "cfeeb942-43cd-41c4-bc67-df76b9b38b0e"). InnerVolumeSpecName "kube-api-access-mqj6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.193101 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-config\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.193459 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbhsn\" (UniqueName: \"kubernetes.io/projected/5eeba233-1e73-4c57-b3a5-03f9552cfc14-kube-api-access-sbhsn\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.193581 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-proxy-ca-bundles\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.193707 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-client-ca\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.193816 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eeba233-1e73-4c57-b3a5-03f9552cfc14-serving-cert\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.193957 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.194041 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqj6b\" (UniqueName: \"kubernetes.io/projected/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-kube-api-access-mqj6b\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.194126 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.194208 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.194321 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfeeb942-43cd-41c4-bc67-df76b9b38b0e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.194662 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-config\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.194686 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-client-ca\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.194790 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-proxy-ca-bundles\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.197213 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eeba233-1e73-4c57-b3a5-03f9552cfc14-serving-cert\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.210868 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbhsn\" (UniqueName: \"kubernetes.io/projected/5eeba233-1e73-4c57-b3a5-03f9552cfc14-kube-api-access-sbhsn\") pod \"controller-manager-747759fcdd-s8xf7\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.364008 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.596551 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-747759fcdd-s8xf7"] Jan 28 12:49:54 crc kubenswrapper[4848]: W0128 12:49:54.606548 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5eeba233_1e73_4c57_b3a5_03f9552cfc14.slice/crio-c355d83c17028d6c027960b1e875a31b353c9ff2517f952e3f62074a25126ad7 WatchSource:0}: Error finding container c355d83c17028d6c027960b1e875a31b353c9ff2517f952e3f62074a25126ad7: Status 404 returned error can't find the container with id c355d83c17028d6c027960b1e875a31b353c9ff2517f952e3f62074a25126ad7 Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.696177 4848 generic.go:334] "Generic (PLEG): container finished" podID="cfeeb942-43cd-41c4-bc67-df76b9b38b0e" containerID="37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481" exitCode=0 Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.696341 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.696366 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" event={"ID":"cfeeb942-43cd-41c4-bc67-df76b9b38b0e","Type":"ContainerDied","Data":"37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481"} Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.697669 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74569667d8-tzxgx" event={"ID":"cfeeb942-43cd-41c4-bc67-df76b9b38b0e","Type":"ContainerDied","Data":"f90a722abd96342d337b7d904e6bcf00bbf5c80d508b97d28a7561ae8718ad6e"} Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.697705 4848 scope.go:117] "RemoveContainer" containerID="37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.707942 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlkg" event={"ID":"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4","Type":"ContainerStarted","Data":"4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac"} Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.709189 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" event={"ID":"5eeba233-1e73-4c57-b3a5-03f9552cfc14","Type":"ContainerStarted","Data":"c355d83c17028d6c027960b1e875a31b353c9ff2517f952e3f62074a25126ad7"} Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.711760 4848 generic.go:334] "Generic (PLEG): container finished" podID="8ec2e3f4-25e6-4207-a91c-d445becad8ff" containerID="52e3ce49b832f81c89594cd46324d6da0ec2c38d73b28495c45d9c04dce59cdc" exitCode=0 Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.711877 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8ec2e3f4-25e6-4207-a91c-d445becad8ff","Type":"ContainerDied","Data":"52e3ce49b832f81c89594cd46324d6da0ec2c38d73b28495c45d9c04dce59cdc"} Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.712785 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.720655 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.725128 4848 scope.go:117] "RemoveContainer" containerID="37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481" Jan 28 12:49:54 crc kubenswrapper[4848]: E0128 12:49:54.726099 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481\": container with ID starting with 37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481 not found: ID does not exist" containerID="37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.726130 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481"} err="failed to get container status \"37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481\": rpc error: code = NotFound desc = could not find container \"37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481\": container with ID starting with 37b3a7c4609b12e473d82916e49a7e6ed3eec040af9d3f55781641ae9a3dc481 not found: ID does not exist" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.727920 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-knlkg" podStartSLOduration=3.76087295 podStartE2EDuration="50.727911034s" podCreationTimestamp="2026-01-28 12:49:04 +0000 UTC" firstStartedPulling="2026-01-28 12:49:07.298141766 +0000 UTC m=+174.210358804" lastFinishedPulling="2026-01-28 12:49:54.26517985 +0000 UTC m=+221.177396888" observedRunningTime="2026-01-28 12:49:54.724817924 +0000 UTC m=+221.637034962" watchObservedRunningTime="2026-01-28 12:49:54.727911034 +0000 UTC m=+221.640128072" Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.775708 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74569667d8-tzxgx"] Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.781366 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-74569667d8-tzxgx"] Jan 28 12:49:54 crc kubenswrapper[4848]: I0128 12:49:54.859007 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfeeb942-43cd-41c4-bc67-df76b9b38b0e" path="/var/lib/kubelet/pods/cfeeb942-43cd-41c4-bc67-df76b9b38b0e/volumes" Jan 28 12:49:55 crc kubenswrapper[4848]: I0128 12:49:55.363844 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:55 crc kubenswrapper[4848]: I0128 12:49:55.363898 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:49:55 crc kubenswrapper[4848]: I0128 12:49:55.720910 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" event={"ID":"5eeba233-1e73-4c57-b3a5-03f9552cfc14","Type":"ContainerStarted","Data":"6ad3da4e9c8cf222de81185d9142973063366fc38b53c117da4bc9b879fb7626"} Jan 28 12:49:55 crc kubenswrapper[4848]: I0128 12:49:55.721398 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:55 crc kubenswrapper[4848]: I0128 12:49:55.728019 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:49:55 crc kubenswrapper[4848]: I0128 12:49:55.747310 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" podStartSLOduration=21.747287661 podStartE2EDuration="21.747287661s" podCreationTimestamp="2026-01-28 12:49:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:55.741828232 +0000 UTC m=+222.654045270" watchObservedRunningTime="2026-01-28 12:49:55.747287661 +0000 UTC m=+222.659504709" Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.027829 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.219534 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kube-api-access\") pod \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.220614 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kubelet-dir\") pod \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\" (UID: \"8ec2e3f4-25e6-4207-a91c-d445becad8ff\") " Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.220742 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "8ec2e3f4-25e6-4207-a91c-d445becad8ff" (UID: "8ec2e3f4-25e6-4207-a91c-d445becad8ff"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.221089 4848 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.227363 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "8ec2e3f4-25e6-4207-a91c-d445becad8ff" (UID: "8ec2e3f4-25e6-4207-a91c-d445becad8ff"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.322739 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ec2e3f4-25e6-4207-a91c-d445becad8ff-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.624198 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-knlkg" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="registry-server" probeResult="failure" output=< Jan 28 12:49:56 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 12:49:56 crc kubenswrapper[4848]: > Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.727700 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8ec2e3f4-25e6-4207-a91c-d445becad8ff","Type":"ContainerDied","Data":"513794bbea14497ff8256b93e42f4d755c3bb20bf1e12133225e6556bacad4e5"} Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.727747 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="513794bbea14497ff8256b93e42f4d755c3bb20bf1e12133225e6556bacad4e5" Jan 28 12:49:56 crc kubenswrapper[4848]: I0128 12:49:56.728896 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:50:01 crc kubenswrapper[4848]: I0128 12:50:01.209476 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qrnzf"] Jan 28 12:50:04 crc kubenswrapper[4848]: I0128 12:50:04.783900 4848 generic.go:334] "Generic (PLEG): container finished" podID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerID="9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16" exitCode=0 Jan 28 12:50:04 crc kubenswrapper[4848]: I0128 12:50:04.783982 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7gdr" event={"ID":"8e357b5a-bdd4-4681-a70d-afaf1275f5e4","Type":"ContainerDied","Data":"9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16"} Jan 28 12:50:05 crc kubenswrapper[4848]: I0128 12:50:05.424553 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:50:05 crc kubenswrapper[4848]: I0128 12:50:05.476189 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:50:05 crc kubenswrapper[4848]: I0128 12:50:05.793771 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7gdr" event={"ID":"8e357b5a-bdd4-4681-a70d-afaf1275f5e4","Type":"ContainerStarted","Data":"07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64"} Jan 28 12:50:05 crc kubenswrapper[4848]: I0128 12:50:05.811932 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b7gdr" podStartSLOduration=2.7390225729999997 podStartE2EDuration="1m1.81190415s" podCreationTimestamp="2026-01-28 12:49:04 +0000 UTC" firstStartedPulling="2026-01-28 12:49:06.208307346 +0000 UTC m=+173.120524384" lastFinishedPulling="2026-01-28 12:50:05.281188923 +0000 UTC m=+232.193405961" observedRunningTime="2026-01-28 12:50:05.81017209 +0000 UTC m=+232.722389128" watchObservedRunningTime="2026-01-28 12:50:05.81190415 +0000 UTC m=+232.724121188" Jan 28 12:50:06 crc kubenswrapper[4848]: I0128 12:50:06.006022 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlkg"] Jan 28 12:50:06 crc kubenswrapper[4848]: I0128 12:50:06.802313 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z884b" event={"ID":"af50828a-cf61-481c-98c3-fb3e7d8de01a","Type":"ContainerStarted","Data":"c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e"} Jan 28 12:50:06 crc kubenswrapper[4848]: I0128 12:50:06.802596 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-knlkg" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="registry-server" containerID="cri-o://4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac" gracePeriod=2 Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.325081 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.478654 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-utilities\") pod \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.479635 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-catalog-content\") pod \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.479764 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dldp2\" (UniqueName: \"kubernetes.io/projected/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-kube-api-access-dldp2\") pod \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\" (UID: \"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4\") " Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.483207 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-utilities" (OuterVolumeSpecName: "utilities") pod "f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" (UID: "f5b74c31-d1fb-4d97-b018-479e6d6bbdc4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.506560 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-kube-api-access-dldp2" (OuterVolumeSpecName: "kube-api-access-dldp2") pod "f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" (UID: "f5b74c31-d1fb-4d97-b018-479e6d6bbdc4"). InnerVolumeSpecName "kube-api-access-dldp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.520391 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" (UID: "f5b74c31-d1fb-4d97-b018-479e6d6bbdc4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.580809 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.580862 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dldp2\" (UniqueName: \"kubernetes.io/projected/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-kube-api-access-dldp2\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.580877 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.815027 4848 generic.go:334] "Generic (PLEG): container finished" podID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerID="4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac" exitCode=0 Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.815153 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlkg" event={"ID":"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4","Type":"ContainerDied","Data":"4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac"} Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.815202 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlkg" event={"ID":"f5b74c31-d1fb-4d97-b018-479e6d6bbdc4","Type":"ContainerDied","Data":"aa4f3c3fc198112feaf9f6ce3e631845b908b45dc2f793543521e26386dafb63"} Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.815229 4848 scope.go:117] "RemoveContainer" containerID="4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.815454 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlkg" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.830124 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-np68v" event={"ID":"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256","Type":"ContainerStarted","Data":"b80c7ccf51b5f1ea0b7c4285465da8c6df7baacd2c0844961b14776d333a9575"} Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.832092 4848 generic.go:334] "Generic (PLEG): container finished" podID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerID="c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e" exitCode=0 Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.832125 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z884b" event={"ID":"af50828a-cf61-481c-98c3-fb3e7d8de01a","Type":"ContainerDied","Data":"c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e"} Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.841160 4848 scope.go:117] "RemoveContainer" containerID="504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.904444 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlkg"] Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.911986 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlkg"] Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.916711 4848 scope.go:117] "RemoveContainer" containerID="dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.924144 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.924237 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.924311 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.927849 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.927953 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f" gracePeriod=600 Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.946797 4848 scope.go:117] "RemoveContainer" containerID="4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac" Jan 28 12:50:07 crc kubenswrapper[4848]: E0128 12:50:07.947394 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac\": container with ID starting with 4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac not found: ID does not exist" containerID="4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.947446 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac"} err="failed to get container status \"4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac\": rpc error: code = NotFound desc = could not find container \"4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac\": container with ID starting with 4b2fd9fb8b832c86bca2d6ddd6fb3577a1a56eaaaa50f9dd01215a0d5be893ac not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.947482 4848 scope.go:117] "RemoveContainer" containerID="504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49" Jan 28 12:50:07 crc kubenswrapper[4848]: E0128 12:50:07.947739 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49\": container with ID starting with 504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49 not found: ID does not exist" containerID="504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.947779 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49"} err="failed to get container status \"504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49\": rpc error: code = NotFound desc = could not find container \"504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49\": container with ID starting with 504b7b24817ea18938509f76eb52a17291d5b2d2fd67d2fba177f30359eecd49 not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.947800 4848 scope.go:117] "RemoveContainer" containerID="dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29" Jan 28 12:50:07 crc kubenswrapper[4848]: E0128 12:50:07.948463 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29\": container with ID starting with dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29 not found: ID does not exist" containerID="dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29" Jan 28 12:50:07 crc kubenswrapper[4848]: I0128 12:50:07.948805 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29"} err="failed to get container status \"dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29\": rpc error: code = NotFound desc = could not find container \"dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29\": container with ID starting with dbdaf4afda1d5ed7efcc69053f3f0f1d8390ac474c674a2546d88ae205d2be29 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.841630 4848 generic.go:334] "Generic (PLEG): container finished" podID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerID="37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.841747 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8bpv" event={"ID":"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99","Type":"ContainerDied","Data":"37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5"} Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.844630 4848 generic.go:334] "Generic (PLEG): container finished" podID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerID="b80c7ccf51b5f1ea0b7c4285465da8c6df7baacd2c0844961b14776d333a9575" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.844670 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-np68v" event={"ID":"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256","Type":"ContainerDied","Data":"b80c7ccf51b5f1ea0b7c4285465da8c6df7baacd2c0844961b14776d333a9575"} Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.852808 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.856935 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" path="/var/lib/kubelet/pods/f5b74c31-d1fb-4d97-b018-479e6d6bbdc4/volumes" Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.857520 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f"} Jan 28 12:50:08 crc kubenswrapper[4848]: I0128 12:50:08.857544 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zt82" event={"ID":"ff062c59-745a-4664-b98f-f2fb669edf1f","Type":"ContainerStarted","Data":"fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6"} Jan 28 12:50:09 crc kubenswrapper[4848]: I0128 12:50:09.867941 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"fb4546396a3b9dcca725d914f66533ca38a08639f07d6f3c7c70ed82ab8d257f"} Jan 28 12:50:09 crc kubenswrapper[4848]: I0128 12:50:09.870323 4848 generic.go:334] "Generic (PLEG): container finished" podID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerID="fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6" exitCode=0 Jan 28 12:50:09 crc kubenswrapper[4848]: I0128 12:50:09.870384 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zt82" event={"ID":"ff062c59-745a-4664-b98f-f2fb669edf1f","Type":"ContainerDied","Data":"fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6"} Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.886986 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z884b" event={"ID":"af50828a-cf61-481c-98c3-fb3e7d8de01a","Type":"ContainerStarted","Data":"20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee"} Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.891133 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zt82" event={"ID":"ff062c59-745a-4664-b98f-f2fb669edf1f","Type":"ContainerStarted","Data":"212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a"} Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.894410 4848 generic.go:334] "Generic (PLEG): container finished" podID="5f838c7f-4c63-4856-8991-ae814400975f" containerID="fecf0306adb9c0f08b77646de96fac9bcff63bc292fb036a57eeb3bbdbe53484" exitCode=0 Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.894494 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27vnd" event={"ID":"5f838c7f-4c63-4856-8991-ae814400975f","Type":"ContainerDied","Data":"fecf0306adb9c0f08b77646de96fac9bcff63bc292fb036a57eeb3bbdbe53484"} Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.899239 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8bpv" event={"ID":"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99","Type":"ContainerStarted","Data":"6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c"} Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.902096 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6gkd" event={"ID":"26dc23f3-cba2-4cb1-9cf6-7402896c876d","Type":"ContainerStarted","Data":"2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4"} Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.904706 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-np68v" event={"ID":"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256","Type":"ContainerStarted","Data":"f47dafb6db689a490b0c948bfc792be57261dfaaf217210662df1278f82a2771"} Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.912653 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z884b" podStartSLOduration=2.95864209 podStartE2EDuration="1m9.912636952s" podCreationTimestamp="2026-01-28 12:49:02 +0000 UTC" firstStartedPulling="2026-01-28 12:49:03.871120045 +0000 UTC m=+170.783337083" lastFinishedPulling="2026-01-28 12:50:10.825114907 +0000 UTC m=+237.737331945" observedRunningTime="2026-01-28 12:50:11.911058576 +0000 UTC m=+238.823275624" watchObservedRunningTime="2026-01-28 12:50:11.912636952 +0000 UTC m=+238.824853980" Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.930799 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d8bpv" podStartSLOduration=2.711411293 podStartE2EDuration="1m9.930783731s" podCreationTimestamp="2026-01-28 12:49:02 +0000 UTC" firstStartedPulling="2026-01-28 12:49:03.828536417 +0000 UTC m=+170.740753455" lastFinishedPulling="2026-01-28 12:50:11.047908855 +0000 UTC m=+237.960125893" observedRunningTime="2026-01-28 12:50:11.927581888 +0000 UTC m=+238.839798926" watchObservedRunningTime="2026-01-28 12:50:11.930783731 +0000 UTC m=+238.843000769" Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.945713 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-np68v" podStartSLOduration=2.934957694 podStartE2EDuration="1m9.945696946s" podCreationTimestamp="2026-01-28 12:49:02 +0000 UTC" firstStartedPulling="2026-01-28 12:49:03.84336668 +0000 UTC m=+170.755583718" lastFinishedPulling="2026-01-28 12:50:10.854105932 +0000 UTC m=+237.766322970" observedRunningTime="2026-01-28 12:50:11.945096539 +0000 UTC m=+238.857313577" watchObservedRunningTime="2026-01-28 12:50:11.945696946 +0000 UTC m=+238.857913984" Jan 28 12:50:11 crc kubenswrapper[4848]: I0128 12:50:11.969423 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4zt82" podStartSLOduration=4.264030093 podStartE2EDuration="1m6.969406188s" podCreationTimestamp="2026-01-28 12:49:05 +0000 UTC" firstStartedPulling="2026-01-28 12:49:08.347927987 +0000 UTC m=+175.260145025" lastFinishedPulling="2026-01-28 12:50:11.053304082 +0000 UTC m=+237.965521120" observedRunningTime="2026-01-28 12:50:11.965359439 +0000 UTC m=+238.877576477" watchObservedRunningTime="2026-01-28 12:50:11.969406188 +0000 UTC m=+238.881623226" Jan 28 12:50:12 crc kubenswrapper[4848]: I0128 12:50:12.709146 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:50:12 crc kubenswrapper[4848]: I0128 12:50:12.709259 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:50:12 crc kubenswrapper[4848]: I0128 12:50:12.765257 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:50:12 crc kubenswrapper[4848]: I0128 12:50:12.916667 4848 generic.go:334] "Generic (PLEG): container finished" podID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerID="2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4" exitCode=0 Jan 28 12:50:12 crc kubenswrapper[4848]: I0128 12:50:12.917976 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6gkd" event={"ID":"26dc23f3-cba2-4cb1-9cf6-7402896c876d","Type":"ContainerDied","Data":"2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4"} Jan 28 12:50:12 crc kubenswrapper[4848]: I0128 12:50:12.925537 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:50:12 crc kubenswrapper[4848]: I0128 12:50:12.926468 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.099337 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.099389 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.140724 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.925623 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27vnd" event={"ID":"5f838c7f-4c63-4856-8991-ae814400975f","Type":"ContainerStarted","Data":"73ccab5db3a43dc6e6c639991247d3cccd6958d8883b1772b747a7a07890d307"} Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.927554 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6gkd" event={"ID":"26dc23f3-cba2-4cb1-9cf6-7402896c876d","Type":"ContainerStarted","Data":"fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b"} Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.951960 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-27vnd" podStartSLOduration=4.19826826 podStartE2EDuration="1m11.951932313s" podCreationTimestamp="2026-01-28 12:49:02 +0000 UTC" firstStartedPulling="2026-01-28 12:49:05.065623489 +0000 UTC m=+171.977840527" lastFinishedPulling="2026-01-28 12:50:12.819287542 +0000 UTC m=+239.731504580" observedRunningTime="2026-01-28 12:50:13.948208574 +0000 UTC m=+240.860425612" watchObservedRunningTime="2026-01-28 12:50:13.951932313 +0000 UTC m=+240.864149351" Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.967915 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c6gkd" podStartSLOduration=2.530216052 podStartE2EDuration="1m8.967891268s" podCreationTimestamp="2026-01-28 12:49:05 +0000 UTC" firstStartedPulling="2026-01-28 12:49:07.324141492 +0000 UTC m=+174.236358530" lastFinishedPulling="2026-01-28 12:50:13.761816708 +0000 UTC m=+240.674033746" observedRunningTime="2026-01-28 12:50:13.965929361 +0000 UTC m=+240.878146419" watchObservedRunningTime="2026-01-28 12:50:13.967891268 +0000 UTC m=+240.880108306" Jan 28 12:50:13 crc kubenswrapper[4848]: I0128 12:50:13.974896 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-d8bpv" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="registry-server" probeResult="failure" output=< Jan 28 12:50:13 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 12:50:13 crc kubenswrapper[4848]: > Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.496022 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-747759fcdd-s8xf7"] Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.496432 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" podUID="5eeba233-1e73-4c57-b3a5-03f9552cfc14" containerName="controller-manager" containerID="cri-o://6ad3da4e9c8cf222de81185d9142973063366fc38b53c117da4bc9b879fb7626" gracePeriod=30 Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.591758 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz"] Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.592548 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" podUID="921d3a1d-6fb7-4421-9f1f-fba49ff10219" containerName="route-controller-manager" containerID="cri-o://af4268c489b76b5cdb3611e17c5c6b9c580cc5559c9bdb7926e364f126bb4fea" gracePeriod=30 Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.938581 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.939417 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.949306 4848 generic.go:334] "Generic (PLEG): container finished" podID="5eeba233-1e73-4c57-b3a5-03f9552cfc14" containerID="6ad3da4e9c8cf222de81185d9142973063366fc38b53c117da4bc9b879fb7626" exitCode=0 Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.949419 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" event={"ID":"5eeba233-1e73-4c57-b3a5-03f9552cfc14","Type":"ContainerDied","Data":"6ad3da4e9c8cf222de81185d9142973063366fc38b53c117da4bc9b879fb7626"} Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.953126 4848 generic.go:334] "Generic (PLEG): container finished" podID="921d3a1d-6fb7-4421-9f1f-fba49ff10219" containerID="af4268c489b76b5cdb3611e17c5c6b9c580cc5559c9bdb7926e364f126bb4fea" exitCode=0 Jan 28 12:50:14 crc kubenswrapper[4848]: I0128 12:50:14.954520 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" event={"ID":"921d3a1d-6fb7-4421-9f1f-fba49ff10219","Type":"ContainerDied","Data":"af4268c489b76b5cdb3611e17c5c6b9c580cc5559c9bdb7926e364f126bb4fea"} Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.006596 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.156935 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.215432 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-client-ca\") pod \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.215535 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmdzx\" (UniqueName: \"kubernetes.io/projected/921d3a1d-6fb7-4421-9f1f-fba49ff10219-kube-api-access-mmdzx\") pod \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.215576 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-config\") pod \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.215624 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/921d3a1d-6fb7-4421-9f1f-fba49ff10219-serving-cert\") pod \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\" (UID: \"921d3a1d-6fb7-4421-9f1f-fba49ff10219\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.216855 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-config" (OuterVolumeSpecName: "config") pod "921d3a1d-6fb7-4421-9f1f-fba49ff10219" (UID: "921d3a1d-6fb7-4421-9f1f-fba49ff10219"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.217281 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-client-ca" (OuterVolumeSpecName: "client-ca") pod "921d3a1d-6fb7-4421-9f1f-fba49ff10219" (UID: "921d3a1d-6fb7-4421-9f1f-fba49ff10219"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.228536 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/921d3a1d-6fb7-4421-9f1f-fba49ff10219-kube-api-access-mmdzx" (OuterVolumeSpecName: "kube-api-access-mmdzx") pod "921d3a1d-6fb7-4421-9f1f-fba49ff10219" (UID: "921d3a1d-6fb7-4421-9f1f-fba49ff10219"). InnerVolumeSpecName "kube-api-access-mmdzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.234451 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/921d3a1d-6fb7-4421-9f1f-fba49ff10219-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "921d3a1d-6fb7-4421-9f1f-fba49ff10219" (UID: "921d3a1d-6fb7-4421-9f1f-fba49ff10219"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.317291 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmdzx\" (UniqueName: \"kubernetes.io/projected/921d3a1d-6fb7-4421-9f1f-fba49ff10219-kube-api-access-mmdzx\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.317362 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.317378 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/921d3a1d-6fb7-4421-9f1f-fba49ff10219-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.317400 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/921d3a1d-6fb7-4421-9f1f-fba49ff10219-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.636887 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.824493 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-config\") pod \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.824568 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-proxy-ca-bundles\") pod \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.824601 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eeba233-1e73-4c57-b3a5-03f9552cfc14-serving-cert\") pod \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.824678 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-client-ca\") pod \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.824729 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbhsn\" (UniqueName: \"kubernetes.io/projected/5eeba233-1e73-4c57-b3a5-03f9552cfc14-kube-api-access-sbhsn\") pod \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\" (UID: \"5eeba233-1e73-4c57-b3a5-03f9552cfc14\") " Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.825988 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-client-ca" (OuterVolumeSpecName: "client-ca") pod "5eeba233-1e73-4c57-b3a5-03f9552cfc14" (UID: "5eeba233-1e73-4c57-b3a5-03f9552cfc14"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.826063 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5eeba233-1e73-4c57-b3a5-03f9552cfc14" (UID: "5eeba233-1e73-4c57-b3a5-03f9552cfc14"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.826549 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-config" (OuterVolumeSpecName: "config") pod "5eeba233-1e73-4c57-b3a5-03f9552cfc14" (UID: "5eeba233-1e73-4c57-b3a5-03f9552cfc14"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.829626 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eeba233-1e73-4c57-b3a5-03f9552cfc14-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5eeba233-1e73-4c57-b3a5-03f9552cfc14" (UID: "5eeba233-1e73-4c57-b3a5-03f9552cfc14"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.829644 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eeba233-1e73-4c57-b3a5-03f9552cfc14-kube-api-access-sbhsn" (OuterVolumeSpecName: "kube-api-access-sbhsn") pod "5eeba233-1e73-4c57-b3a5-03f9552cfc14" (UID: "5eeba233-1e73-4c57-b3a5-03f9552cfc14"). InnerVolumeSpecName "kube-api-access-sbhsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.927088 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.927153 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.927169 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eeba233-1e73-4c57-b3a5-03f9552cfc14-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.927182 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5eeba233-1e73-4c57-b3a5-03f9552cfc14-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.927198 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbhsn\" (UniqueName: \"kubernetes.io/projected/5eeba233-1e73-4c57-b3a5-03f9552cfc14-kube-api-access-sbhsn\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.944186 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.944339 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.962578 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" event={"ID":"921d3a1d-6fb7-4421-9f1f-fba49ff10219","Type":"ContainerDied","Data":"25d306afc08c006174636b073b71ba141b917d9498da4ab6aae3e0101e00cf3a"} Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.962660 4848 scope.go:117] "RemoveContainer" containerID="af4268c489b76b5cdb3611e17c5c6b9c580cc5559c9bdb7926e364f126bb4fea" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.962858 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.970426 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.972422 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-747759fcdd-s8xf7" event={"ID":"5eeba233-1e73-4c57-b3a5-03f9552cfc14","Type":"ContainerDied","Data":"c355d83c17028d6c027960b1e875a31b353c9ff2517f952e3f62074a25126ad7"} Jan 28 12:50:15 crc kubenswrapper[4848]: I0128 12:50:15.995602 4848 scope.go:117] "RemoveContainer" containerID="6ad3da4e9c8cf222de81185d9142973063366fc38b53c117da4bc9b879fb7626" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.003961 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.006897 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b88d6b4c9-8k5vz"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.025749 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-747759fcdd-s8xf7"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.032070 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-747759fcdd-s8xf7"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.040085 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138145 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-857b45fc74-sfl86"] Jan 28 12:50:16 crc kubenswrapper[4848]: E0128 12:50:16.138494 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eeba233-1e73-4c57-b3a5-03f9552cfc14" containerName="controller-manager" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138511 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eeba233-1e73-4c57-b3a5-03f9552cfc14" containerName="controller-manager" Jan 28 12:50:16 crc kubenswrapper[4848]: E0128 12:50:16.138522 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="921d3a1d-6fb7-4421-9f1f-fba49ff10219" containerName="route-controller-manager" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138528 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="921d3a1d-6fb7-4421-9f1f-fba49ff10219" containerName="route-controller-manager" Jan 28 12:50:16 crc kubenswrapper[4848]: E0128 12:50:16.138541 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec2e3f4-25e6-4207-a91c-d445becad8ff" containerName="pruner" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138547 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec2e3f4-25e6-4207-a91c-d445becad8ff" containerName="pruner" Jan 28 12:50:16 crc kubenswrapper[4848]: E0128 12:50:16.138557 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="registry-server" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138563 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="registry-server" Jan 28 12:50:16 crc kubenswrapper[4848]: E0128 12:50:16.138572 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="extract-utilities" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138578 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="extract-utilities" Jan 28 12:50:16 crc kubenswrapper[4848]: E0128 12:50:16.138596 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="extract-content" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138603 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="extract-content" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138732 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="921d3a1d-6fb7-4421-9f1f-fba49ff10219" containerName="route-controller-manager" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138751 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec2e3f4-25e6-4207-a91c-d445becad8ff" containerName="pruner" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138761 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eeba233-1e73-4c57-b3a5-03f9552cfc14" containerName="controller-manager" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.138773 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5b74c31-d1fb-4d97-b018-479e6d6bbdc4" containerName="registry-server" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.139365 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.144357 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.144728 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.144937 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.145074 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.146198 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.146392 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.155898 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.157605 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-fb5964489-2598k"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.158747 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.160917 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.163652 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.164428 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.164576 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.165050 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.166390 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.172316 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-857b45fc74-sfl86"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.178578 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-fb5964489-2598k"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233172 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-client-ca\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233239 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1f4e51e-7e30-400d-a472-c398091b68d6-serving-cert\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233299 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh2dp\" (UniqueName: \"kubernetes.io/projected/f1f4e51e-7e30-400d-a472-c398091b68d6-kube-api-access-bh2dp\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233333 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-client-ca\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233356 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztjw5\" (UniqueName: \"kubernetes.io/projected/0f895523-c590-4a7e-8070-7434eac8246e-kube-api-access-ztjw5\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233381 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-proxy-ca-bundles\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233402 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f895523-c590-4a7e-8070-7434eac8246e-serving-cert\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233493 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-config\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.233601 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-config\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334643 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh2dp\" (UniqueName: \"kubernetes.io/projected/f1f4e51e-7e30-400d-a472-c398091b68d6-kube-api-access-bh2dp\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334710 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-client-ca\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334740 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztjw5\" (UniqueName: \"kubernetes.io/projected/0f895523-c590-4a7e-8070-7434eac8246e-kube-api-access-ztjw5\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334769 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-proxy-ca-bundles\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334796 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f895523-c590-4a7e-8070-7434eac8246e-serving-cert\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334815 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-config\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334834 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-config\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334870 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-client-ca\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.334903 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1f4e51e-7e30-400d-a472-c398091b68d6-serving-cert\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.336584 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-client-ca\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.336633 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-proxy-ca-bundles\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.336678 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-client-ca\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.336814 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-config\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.343589 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-config\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.346377 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f895523-c590-4a7e-8070-7434eac8246e-serving-cert\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.355908 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh2dp\" (UniqueName: \"kubernetes.io/projected/f1f4e51e-7e30-400d-a472-c398091b68d6-kube-api-access-bh2dp\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.358020 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1f4e51e-7e30-400d-a472-c398091b68d6-serving-cert\") pod \"controller-manager-857b45fc74-sfl86\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.360998 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztjw5\" (UniqueName: \"kubernetes.io/projected/0f895523-c590-4a7e-8070-7434eac8246e-kube-api-access-ztjw5\") pod \"route-controller-manager-fb5964489-2598k\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.469539 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.480228 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.528604 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.528671 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.821163 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-857b45fc74-sfl86"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.862856 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5eeba233-1e73-4c57-b3a5-03f9552cfc14" path="/var/lib/kubelet/pods/5eeba233-1e73-4c57-b3a5-03f9552cfc14/volumes" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.864005 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="921d3a1d-6fb7-4421-9f1f-fba49ff10219" path="/var/lib/kubelet/pods/921d3a1d-6fb7-4421-9f1f-fba49ff10219/volumes" Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.948381 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-fb5964489-2598k"] Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.980815 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" event={"ID":"0f895523-c590-4a7e-8070-7434eac8246e","Type":"ContainerStarted","Data":"e038dfc1d0f7561a2add9345d26ead8e98b0a757adeec4ea224140b5b146cd2f"} Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.984348 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" event={"ID":"f1f4e51e-7e30-400d-a472-c398091b68d6","Type":"ContainerStarted","Data":"9f16d55e3b06b71fddd8051c712ac2bd3d2da9d7b8c1b2cd504080dfa8aac92d"} Jan 28 12:50:16 crc kubenswrapper[4848]: I0128 12:50:16.996306 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6gkd" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="registry-server" probeResult="failure" output=< Jan 28 12:50:16 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 12:50:16 crc kubenswrapper[4848]: > Jan 28 12:50:17 crc kubenswrapper[4848]: I0128 12:50:17.577225 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zt82" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="registry-server" probeResult="failure" output=< Jan 28 12:50:17 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 12:50:17 crc kubenswrapper[4848]: > Jan 28 12:50:17 crc kubenswrapper[4848]: I0128 12:50:17.992775 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" event={"ID":"f1f4e51e-7e30-400d-a472-c398091b68d6","Type":"ContainerStarted","Data":"2c30db9a8880dc77c5a2ebb9c106d407c59c1a21e0041989f6aa6a6fd5b4a1f1"} Jan 28 12:50:20 crc kubenswrapper[4848]: I0128 12:50:20.006490 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" event={"ID":"0f895523-c590-4a7e-8070-7434eac8246e","Type":"ContainerStarted","Data":"934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d"} Jan 28 12:50:20 crc kubenswrapper[4848]: I0128 12:50:20.007145 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:20 crc kubenswrapper[4848]: I0128 12:50:20.007164 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:20 crc kubenswrapper[4848]: I0128 12:50:20.013941 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:50:20 crc kubenswrapper[4848]: I0128 12:50:20.014128 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:50:20 crc kubenswrapper[4848]: I0128 12:50:20.027353 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" podStartSLOduration=6.027332555 podStartE2EDuration="6.027332555s" podCreationTimestamp="2026-01-28 12:50:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:20.025349097 +0000 UTC m=+246.937566135" watchObservedRunningTime="2026-01-28 12:50:20.027332555 +0000 UTC m=+246.939549593" Jan 28 12:50:20 crc kubenswrapper[4848]: I0128 12:50:20.048055 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" podStartSLOduration=6.048029878 podStartE2EDuration="6.048029878s" podCreationTimestamp="2026-01-28 12:50:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:20.044422373 +0000 UTC m=+246.956639421" watchObservedRunningTime="2026-01-28 12:50:20.048029878 +0000 UTC m=+246.960246926" Jan 28 12:50:22 crc kubenswrapper[4848]: I0128 12:50:22.761597 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:50:22 crc kubenswrapper[4848]: I0128 12:50:22.965364 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:50:23 crc kubenswrapper[4848]: I0128 12:50:23.020447 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:50:23 crc kubenswrapper[4848]: I0128 12:50:23.141157 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:50:23 crc kubenswrapper[4848]: I0128 12:50:23.315920 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:50:23 crc kubenswrapper[4848]: I0128 12:50:23.315990 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:50:23 crc kubenswrapper[4848]: I0128 12:50:23.365493 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:50:24 crc kubenswrapper[4848]: I0128 12:50:24.076558 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:50:24 crc kubenswrapper[4848]: I0128 12:50:24.802576 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-27vnd"] Jan 28 12:50:25 crc kubenswrapper[4848]: I0128 12:50:25.396861 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-np68v"] Jan 28 12:50:25 crc kubenswrapper[4848]: I0128 12:50:25.397127 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-np68v" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="registry-server" containerID="cri-o://f47dafb6db689a490b0c948bfc792be57261dfaaf217210662df1278f82a2771" gracePeriod=2 Jan 28 12:50:25 crc kubenswrapper[4848]: I0128 12:50:25.987516 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:50:26 crc kubenswrapper[4848]: I0128 12:50:26.039467 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:50:26 crc kubenswrapper[4848]: I0128 12:50:26.045232 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-27vnd" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="registry-server" containerID="cri-o://73ccab5db3a43dc6e6c639991247d3cccd6958d8883b1772b747a7a07890d307" gracePeriod=2 Jan 28 12:50:26 crc kubenswrapper[4848]: I0128 12:50:26.244535 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" podUID="61c3ecdb-58a8-4558-a43c-81cd7e8b4132" containerName="oauth-openshift" containerID="cri-o://3c28d6357f5c2b631cfb923e595df612a6eeb79f25cbc7ee163cdbbf4943ac2b" gracePeriod=15 Jan 28 12:50:26 crc kubenswrapper[4848]: I0128 12:50:26.580445 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:50:26 crc kubenswrapper[4848]: I0128 12:50:26.626277 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.055396 4848 generic.go:334] "Generic (PLEG): container finished" podID="5f838c7f-4c63-4856-8991-ae814400975f" containerID="73ccab5db3a43dc6e6c639991247d3cccd6958d8883b1772b747a7a07890d307" exitCode=0 Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.055476 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27vnd" event={"ID":"5f838c7f-4c63-4856-8991-ae814400975f","Type":"ContainerDied","Data":"73ccab5db3a43dc6e6c639991247d3cccd6958d8883b1772b747a7a07890d307"} Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.057709 4848 generic.go:334] "Generic (PLEG): container finished" podID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerID="f47dafb6db689a490b0c948bfc792be57261dfaaf217210662df1278f82a2771" exitCode=0 Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.057787 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-np68v" event={"ID":"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256","Type":"ContainerDied","Data":"f47dafb6db689a490b0c948bfc792be57261dfaaf217210662df1278f82a2771"} Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.059430 4848 generic.go:334] "Generic (PLEG): container finished" podID="61c3ecdb-58a8-4558-a43c-81cd7e8b4132" containerID="3c28d6357f5c2b631cfb923e595df612a6eeb79f25cbc7ee163cdbbf4943ac2b" exitCode=0 Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.059505 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" event={"ID":"61c3ecdb-58a8-4558-a43c-81cd7e8b4132","Type":"ContainerDied","Data":"3c28d6357f5c2b631cfb923e595df612a6eeb79f25cbc7ee163cdbbf4943ac2b"} Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.915720 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.987727 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-dir\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.987825 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-serving-cert\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.987874 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-idp-0-file-data\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.987872 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.987903 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-login\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988042 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-policies\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988111 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-router-certs\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988152 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-service-ca\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988196 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-cliconfig\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988276 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cjdm\" (UniqueName: \"kubernetes.io/projected/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-kube-api-access-8cjdm\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988302 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-provider-selection\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988357 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-error\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988394 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-trusted-ca-bundle\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988425 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-ocp-branding-template\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.988464 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-session\") pod \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\" (UID: \"61c3ecdb-58a8-4558-a43c-81cd7e8b4132\") " Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.989072 4848 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:27 crc kubenswrapper[4848]: I0128 12:50:27.997504 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.002806 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.002868 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.003422 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.006606 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.007026 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.023532 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.023989 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.025238 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.026096 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.026756 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.034944 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-kube-api-access-8cjdm" (OuterVolumeSpecName: "kube-api-access-8cjdm") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "kube-api-access-8cjdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.034982 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "61c3ecdb-58a8-4558-a43c-81cd7e8b4132" (UID: "61c3ecdb-58a8-4558-a43c-81cd7e8b4132"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.083038 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" event={"ID":"61c3ecdb-58a8-4558-a43c-81cd7e8b4132","Type":"ContainerDied","Data":"7080c909ed55f88bc4fef0520527fc3c0909c6b1c5281ff5b0cff4557e2edb33"} Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.083120 4848 scope.go:117] "RemoveContainer" containerID="3c28d6357f5c2b631cfb923e595df612a6eeb79f25cbc7ee163cdbbf4943ac2b" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.083114 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qrnzf" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090049 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090095 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090113 4848 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090132 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090146 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090159 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090172 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cjdm\" (UniqueName: \"kubernetes.io/projected/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-kube-api-access-8cjdm\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090188 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090204 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090218 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090230 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090257 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.090288 4848 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61c3ecdb-58a8-4558-a43c-81cd7e8b4132-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.113619 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.128352 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qrnzf"] Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.130681 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qrnzf"] Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.167037 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-574b75df8-z2728"] Jan 28 12:50:28 crc kubenswrapper[4848]: E0128 12:50:28.167424 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="registry-server" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.167440 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="registry-server" Jan 28 12:50:28 crc kubenswrapper[4848]: E0128 12:50:28.167462 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61c3ecdb-58a8-4558-a43c-81cd7e8b4132" containerName="oauth-openshift" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.167468 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="61c3ecdb-58a8-4558-a43c-81cd7e8b4132" containerName="oauth-openshift" Jan 28 12:50:28 crc kubenswrapper[4848]: E0128 12:50:28.167478 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="extract-utilities" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.167484 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="extract-utilities" Jan 28 12:50:28 crc kubenswrapper[4848]: E0128 12:50:28.167492 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="extract-content" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.167498 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="extract-content" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.167625 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" containerName="registry-server" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.167643 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="61c3ecdb-58a8-4558-a43c-81cd7e8b4132" containerName="oauth-openshift" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.168120 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.170572 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.171615 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.171802 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.172011 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.172770 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.172869 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.173171 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.173406 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.173518 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.173766 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.174020 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.174218 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.179712 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-574b75df8-z2728"] Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.181474 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.184654 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.190706 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w28lg\" (UniqueName: \"kubernetes.io/projected/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-kube-api-access-w28lg\") pod \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.190828 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-catalog-content\") pod \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.190713 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191063 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-utilities\") pod \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\" (UID: \"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256\") " Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191288 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-audit-policies\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191330 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-service-ca\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191359 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/32f32501-893c-4f73-ad89-3c4f37c7a422-audit-dir\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191407 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191440 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191467 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191492 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zvzn\" (UniqueName: \"kubernetes.io/projected/32f32501-893c-4f73-ad89-3c4f37c7a422-kube-api-access-6zvzn\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191522 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-serving-cert\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.191584 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-session\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.193086 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-router-certs\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.193128 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.193593 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-login\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.193650 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-error\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.193674 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-cliconfig\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.196039 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.199311 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-utilities" (OuterVolumeSpecName: "utilities") pod "07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" (UID: "07ba7451-b14b-4eaa-9ed3-6fca9ab7d256"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.199579 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-kube-api-access-w28lg" (OuterVolumeSpecName: "kube-api-access-w28lg") pod "07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" (UID: "07ba7451-b14b-4eaa-9ed3-6fca9ab7d256"). InnerVolumeSpecName "kube-api-access-w28lg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.252603 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" (UID: "07ba7451-b14b-4eaa-9ed3-6fca9ab7d256"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.294537 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f998s\" (UniqueName: \"kubernetes.io/projected/5f838c7f-4c63-4856-8991-ae814400975f-kube-api-access-f998s\") pod \"5f838c7f-4c63-4856-8991-ae814400975f\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.294758 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-utilities\") pod \"5f838c7f-4c63-4856-8991-ae814400975f\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.294848 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-catalog-content\") pod \"5f838c7f-4c63-4856-8991-ae814400975f\" (UID: \"5f838c7f-4c63-4856-8991-ae814400975f\") " Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295059 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295122 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295170 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295196 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zvzn\" (UniqueName: \"kubernetes.io/projected/32f32501-893c-4f73-ad89-3c4f37c7a422-kube-api-access-6zvzn\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295226 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-serving-cert\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295297 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-session\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295318 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-router-certs\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295373 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295403 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-login\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295453 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-error\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295478 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-cliconfig\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295550 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-audit-policies\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295607 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-service-ca\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295634 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/32f32501-893c-4f73-ad89-3c4f37c7a422-audit-dir\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295711 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w28lg\" (UniqueName: \"kubernetes.io/projected/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-kube-api-access-w28lg\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295729 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295764 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.295804 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/32f32501-893c-4f73-ad89-3c4f37c7a422-audit-dir\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.296626 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-utilities" (OuterVolumeSpecName: "utilities") pod "5f838c7f-4c63-4856-8991-ae814400975f" (UID: "5f838c7f-4c63-4856-8991-ae814400975f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.297563 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-audit-policies\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.297722 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-cliconfig\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.297877 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-service-ca\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.299449 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.299126 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f838c7f-4c63-4856-8991-ae814400975f-kube-api-access-f998s" (OuterVolumeSpecName: "kube-api-access-f998s") pod "5f838c7f-4c63-4856-8991-ae814400975f" (UID: "5f838c7f-4c63-4856-8991-ae814400975f"). InnerVolumeSpecName "kube-api-access-f998s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.300155 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.301617 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-serving-cert\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.302509 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-session\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.303280 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-login\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.303403 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.303867 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.304522 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-system-router-certs\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.305231 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/32f32501-893c-4f73-ad89-3c4f37c7a422-v4-0-config-user-template-error\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.312577 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zvzn\" (UniqueName: \"kubernetes.io/projected/32f32501-893c-4f73-ad89-3c4f37c7a422-kube-api-access-6zvzn\") pod \"oauth-openshift-574b75df8-z2728\" (UID: \"32f32501-893c-4f73-ad89-3c4f37c7a422\") " pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.349485 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f838c7f-4c63-4856-8991-ae814400975f" (UID: "5f838c7f-4c63-4856-8991-ae814400975f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.397868 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f998s\" (UniqueName: \"kubernetes.io/projected/5f838c7f-4c63-4856-8991-ae814400975f-kube-api-access-f998s\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.397957 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.397971 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f838c7f-4c63-4856-8991-ae814400975f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.504961 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.860396 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61c3ecdb-58a8-4558-a43c-81cd7e8b4132" path="/var/lib/kubelet/pods/61c3ecdb-58a8-4558-a43c-81cd7e8b4132/volumes" Jan 28 12:50:28 crc kubenswrapper[4848]: I0128 12:50:28.929846 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-574b75df8-z2728"] Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.094083 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27vnd" event={"ID":"5f838c7f-4c63-4856-8991-ae814400975f","Type":"ContainerDied","Data":"9ec3cd775da0b76a347c7b91a2211c35dcbc92d80b5b86d3737eec486f370ebf"} Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.094145 4848 scope.go:117] "RemoveContainer" containerID="73ccab5db3a43dc6e6c639991247d3cccd6958d8883b1772b747a7a07890d307" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.094321 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27vnd" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.097733 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-np68v" event={"ID":"07ba7451-b14b-4eaa-9ed3-6fca9ab7d256","Type":"ContainerDied","Data":"8a2973da92b276012b598e10e698795478e0e49dc9f5bb4eb3ca2359c7e3b906"} Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.097891 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-np68v" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.103200 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" event={"ID":"32f32501-893c-4f73-ad89-3c4f37c7a422","Type":"ContainerStarted","Data":"881b0994e47052a882b3b4d62bf70f3f8c311cffcd04097d855c37e48ccf4836"} Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.124303 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-27vnd"] Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.129179 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-27vnd"] Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.136083 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-np68v"] Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.140414 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-np68v"] Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.164936 4848 scope.go:117] "RemoveContainer" containerID="fecf0306adb9c0f08b77646de96fac9bcff63bc292fb036a57eeb3bbdbe53484" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.183980 4848 scope.go:117] "RemoveContainer" containerID="94746d5da669fa5c01764e32a6f0f3943f912b71c19eb251ad57648bee68a9d5" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.200048 4848 scope.go:117] "RemoveContainer" containerID="f47dafb6db689a490b0c948bfc792be57261dfaaf217210662df1278f82a2771" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.219715 4848 scope.go:117] "RemoveContainer" containerID="b80c7ccf51b5f1ea0b7c4285465da8c6df7baacd2c0844961b14776d333a9575" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.237356 4848 scope.go:117] "RemoveContainer" containerID="29d700b453963d7d4cd7fe6c6e92cb2deef74ac0cd799990c76a2f17846f6443" Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.601814 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4zt82"] Jan 28 12:50:29 crc kubenswrapper[4848]: I0128 12:50:29.602211 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4zt82" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="registry-server" containerID="cri-o://212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a" gracePeriod=2 Jan 28 12:50:30 crc kubenswrapper[4848]: I0128 12:50:30.855744 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07ba7451-b14b-4eaa-9ed3-6fca9ab7d256" path="/var/lib/kubelet/pods/07ba7451-b14b-4eaa-9ed3-6fca9ab7d256/volumes" Jan 28 12:50:30 crc kubenswrapper[4848]: I0128 12:50:30.856811 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f838c7f-4c63-4856-8991-ae814400975f" path="/var/lib/kubelet/pods/5f838c7f-4c63-4856-8991-ae814400975f/volumes" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.068761 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.085048 4848 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.085899 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb" gracePeriod=15 Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.085995 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059" gracePeriod=15 Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.086096 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9" gracePeriod=15 Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.086235 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d" gracePeriod=15 Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.086332 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e" gracePeriod=15 Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.093598 4848 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094193 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094216 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094227 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094236 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094267 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094275 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094289 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="registry-server" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094297 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="registry-server" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094308 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094319 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094328 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="extract-utilities" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094337 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="extract-utilities" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094350 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="extract-content" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094360 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="extract-content" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094375 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094383 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094392 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094401 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094411 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="extract-utilities" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094419 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="extract-utilities" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094430 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="extract-content" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094439 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="extract-content" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094453 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="registry-server" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094461 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="registry-server" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094477 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094485 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094601 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094614 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094625 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094644 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094653 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerName="registry-server" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094663 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094673 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094681 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f838c7f-4c63-4856-8991-ae814400975f" containerName="registry-server" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094691 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.094813 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.094823 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.096215 4848 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.096885 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.137443 4848 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.138619 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.138682 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.138969 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.149742 4848 generic.go:334] "Generic (PLEG): container finished" podID="ff062c59-745a-4664-b98f-f2fb669edf1f" containerID="212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a" exitCode=0 Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.149806 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4zt82" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.149828 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zt82" event={"ID":"ff062c59-745a-4664-b98f-f2fb669edf1f","Type":"ContainerDied","Data":"212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a"} Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.149863 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zt82" event={"ID":"ff062c59-745a-4664-b98f-f2fb669edf1f","Type":"ContainerDied","Data":"f7db86905e411f4142250af532e0914a5a18894d8d3ae56accaf10665b4c1bd7"} Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.149883 4848 scope.go:117] "RemoveContainer" containerID="212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.156192 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" event={"ID":"32f32501-893c-4f73-ad89-3c4f37c7a422","Type":"ContainerStarted","Data":"6d8b892de32b76a6c47133f14e7c2be6714c63390497ebc2abe506b92c069211"} Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.156767 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.168648 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.182474 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.185516 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" podStartSLOduration=30.18549672 podStartE2EDuration="30.18549672s" podCreationTimestamp="2026-01-28 12:50:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:31.177596851 +0000 UTC m=+258.089813899" watchObservedRunningTime="2026-01-28 12:50:31.18549672 +0000 UTC m=+258.097713758" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.200936 4848 scope.go:117] "RemoveContainer" containerID="fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.240599 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fnch\" (UniqueName: \"kubernetes.io/projected/ff062c59-745a-4664-b98f-f2fb669edf1f-kube-api-access-5fnch\") pod \"ff062c59-745a-4664-b98f-f2fb669edf1f\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.240671 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-catalog-content\") pod \"ff062c59-745a-4664-b98f-f2fb669edf1f\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.240702 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-utilities\") pod \"ff062c59-745a-4664-b98f-f2fb669edf1f\" (UID: \"ff062c59-745a-4664-b98f-f2fb669edf1f\") " Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.240813 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.240879 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.240971 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.241009 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.241058 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.241083 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.241105 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.241160 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.242539 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.242827 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.244115 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.245789 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-utilities" (OuterVolumeSpecName: "utilities") pod "ff062c59-745a-4664-b98f-f2fb669edf1f" (UID: "ff062c59-745a-4664-b98f-f2fb669edf1f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.249313 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff062c59-745a-4664-b98f-f2fb669edf1f-kube-api-access-5fnch" (OuterVolumeSpecName: "kube-api-access-5fnch") pod "ff062c59-745a-4664-b98f-f2fb669edf1f" (UID: "ff062c59-745a-4664-b98f-f2fb669edf1f"). InnerVolumeSpecName "kube-api-access-5fnch". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.271812 4848 scope.go:117] "RemoveContainer" containerID="910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.292048 4848 scope.go:117] "RemoveContainer" containerID="212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.292645 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a\": container with ID starting with 212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a not found: ID does not exist" containerID="212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.292687 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a"} err="failed to get container status \"212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a\": rpc error: code = NotFound desc = could not find container \"212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a\": container with ID starting with 212e4323862b8b6e0b66f9c3a6a88dd46342c851a1922c16f8200033c8176b9a not found: ID does not exist" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.292715 4848 scope.go:117] "RemoveContainer" containerID="fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.293024 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6\": container with ID starting with fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6 not found: ID does not exist" containerID="fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.293069 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6"} err="failed to get container status \"fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6\": rpc error: code = NotFound desc = could not find container \"fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6\": container with ID starting with fe4c8d395a6e08dbc3ff0a75b84c57c270597def6ffd725e23d7a4f377982bc6 not found: ID does not exist" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.293099 4848 scope.go:117] "RemoveContainer" containerID="910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.293395 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d\": container with ID starting with 910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d not found: ID does not exist" containerID="910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.293419 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d"} err="failed to get container status \"910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d\": rpc error: code = NotFound desc = could not find container \"910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d\": container with ID starting with 910768255a707c323dd1a7cfd98b4f32558104c4b2fbdcbeba2c6d7db721632d not found: ID does not exist" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342100 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342177 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342201 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342205 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342324 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342345 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342350 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342403 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342432 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342485 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fnch\" (UniqueName: \"kubernetes.io/projected/ff062c59-745a-4664-b98f-f2fb669edf1f-kube-api-access-5fnch\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342502 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.342542 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.378367 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff062c59-745a-4664-b98f-f2fb669edf1f" (UID: "ff062c59-745a-4664-b98f-f2fb669edf1f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.443737 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff062c59-745a-4664-b98f-f2fb669edf1f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:31 crc kubenswrapper[4848]: I0128 12:50:31.478084 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.501448 4848 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.138:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188ee6095d7aba79 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:50:31.500601977 +0000 UTC m=+258.412819015,LastTimestamp:2026-01-28 12:50:31.500601977 +0000 UTC m=+258.412819015,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.535802 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:31Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:31Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:31Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:31Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:18138ad13c0ce7aec741afb6548dfcdea4ed713ab86fb632d61d11024bb26e33\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:19e2999ecbbb8cab07d01f6dc5abba647ba484961c73972a7cc782bc7b6dd669\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1675432702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:68c28a690c4c3482a63d6de9cf3b80304e983243444eb4d2c5fcaf5c051eb54b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:a273081c72178c20c79eca9b18dbb926d33a6bb826b215c14de6b31207e497ca\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202349806},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:35ef22cdc5de4770d000187db87519d6c455220f78dd8f959ad8a1ff79d29272\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:ad441b86561c9626edb6ee12716deff56722d3a1f55eb3e1e310855efcee9888\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1187231476},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:420326d8488ceff2cde22ad8b85d739b0c254d47e703f7ddb1f08f77a48816a6\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:54817da328fa589491a3acbe80acdd88c0830dcc63aaafc08c3539925a1a3b03\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1180692192},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.536589 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.536951 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.537206 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.537483 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.537507 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:50:31 crc kubenswrapper[4848]: E0128 12:50:31.790632 4848 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.138:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188ee6095d7aba79 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:50:31.500601977 +0000 UTC m=+258.412819015,LastTimestamp:2026-01-28 12:50:31.500601977 +0000 UTC m=+258.412819015,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.168334 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.169612 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.170298 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059" exitCode=0 Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.170324 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9" exitCode=0 Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.170334 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d" exitCode=0 Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.170343 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e" exitCode=2 Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.170403 4848 scope.go:117] "RemoveContainer" containerID="fdf2f40c773c511e1b20b7ca4641a0c1be03da4458274f1accf482647fe5daf3" Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.172691 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"587592f675d31a835ffa36a223de4d844b13e82059ce53f6cdae2a6a6af1470f"} Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.172731 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"d794f67befd30fb55b48f5af1463f933e7cd4d4b672007d71a065a9eae769cc7"} Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.174674 4848 generic.go:334] "Generic (PLEG): container finished" podID="6f754cdc-1dde-4331-842d-824d719b4255" containerID="ea3c6d016f6957e97e24f4cac3d755d032f6fdf69d51bef447b6c5b4187a085d" exitCode=0 Jan 28 12:50:32 crc kubenswrapper[4848]: I0128 12:50:32.175180 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"6f754cdc-1dde-4331-842d-824d719b4255","Type":"ContainerDied","Data":"ea3c6d016f6957e97e24f4cac3d755d032f6fdf69d51bef447b6c5b4187a085d"} Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.185236 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.705676 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.880231 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-var-lock\") pod \"6f754cdc-1dde-4331-842d-824d719b4255\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.880625 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-var-lock" (OuterVolumeSpecName: "var-lock") pod "6f754cdc-1dde-4331-842d-824d719b4255" (UID: "6f754cdc-1dde-4331-842d-824d719b4255"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.880977 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f754cdc-1dde-4331-842d-824d719b4255-kube-api-access\") pod \"6f754cdc-1dde-4331-842d-824d719b4255\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.881044 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-kubelet-dir\") pod \"6f754cdc-1dde-4331-842d-824d719b4255\" (UID: \"6f754cdc-1dde-4331-842d-824d719b4255\") " Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.881094 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6f754cdc-1dde-4331-842d-824d719b4255" (UID: "6f754cdc-1dde-4331-842d-824d719b4255"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.881428 4848 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.881449 4848 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6f754cdc-1dde-4331-842d-824d719b4255-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.891436 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f754cdc-1dde-4331-842d-824d719b4255-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6f754cdc-1dde-4331-842d-824d719b4255" (UID: "6f754cdc-1dde-4331-842d-824d719b4255"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4848]: I0128 12:50:33.982685 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f754cdc-1dde-4331-842d-824d719b4255-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.051072 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.052068 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.186062 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.186201 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.186354 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.186484 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.186512 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.186603 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.187350 4848 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.187370 4848 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.187384 4848 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.196052 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.197393 4848 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb" exitCode=0 Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.197511 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.197520 4848 scope.go:117] "RemoveContainer" containerID="1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.199951 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"6f754cdc-1dde-4331-842d-824d719b4255","Type":"ContainerDied","Data":"2ba495056b289ee21088c426f7e3d697409a96c521e01f3e4f00049d4c113fcb"} Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.200025 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.200047 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ba495056b289ee21088c426f7e3d697409a96c521e01f3e4f00049d4c113fcb" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.222050 4848 scope.go:117] "RemoveContainer" containerID="b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.241463 4848 scope.go:117] "RemoveContainer" containerID="887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.263803 4848 scope.go:117] "RemoveContainer" containerID="f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.281655 4848 scope.go:117] "RemoveContainer" containerID="ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.301560 4848 scope.go:117] "RemoveContainer" containerID="678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.328553 4848 scope.go:117] "RemoveContainer" containerID="1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059" Jan 28 12:50:34 crc kubenswrapper[4848]: E0128 12:50:34.329635 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\": container with ID starting with 1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059 not found: ID does not exist" containerID="1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.329722 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059"} err="failed to get container status \"1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\": rpc error: code = NotFound desc = could not find container \"1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059\": container with ID starting with 1be0f4487b23417c6ec9ede5e4c3f641ef4a501ae23f234ef39ef17dcea51059 not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.329805 4848 scope.go:117] "RemoveContainer" containerID="b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9" Jan 28 12:50:34 crc kubenswrapper[4848]: E0128 12:50:34.330544 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\": container with ID starting with b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9 not found: ID does not exist" containerID="b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.330613 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9"} err="failed to get container status \"b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\": rpc error: code = NotFound desc = could not find container \"b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9\": container with ID starting with b15857e340d61b616f64c17f5e35fa1a06220834a9879f2e931c3ca571a721d9 not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.330657 4848 scope.go:117] "RemoveContainer" containerID="887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d" Jan 28 12:50:34 crc kubenswrapper[4848]: E0128 12:50:34.331213 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\": container with ID starting with 887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d not found: ID does not exist" containerID="887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.331259 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d"} err="failed to get container status \"887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\": rpc error: code = NotFound desc = could not find container \"887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d\": container with ID starting with 887162a1ed1272345aa322834b7e8090460fbff65b064acd1d2be7f109b4f14d not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.331289 4848 scope.go:117] "RemoveContainer" containerID="f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e" Jan 28 12:50:34 crc kubenswrapper[4848]: E0128 12:50:34.331740 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\": container with ID starting with f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e not found: ID does not exist" containerID="f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.331781 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e"} err="failed to get container status \"f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\": rpc error: code = NotFound desc = could not find container \"f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e\": container with ID starting with f36ae8fb01b3168a9c1acfe9d6ea4860a46257ed726704dc0283ffdc722e5b0e not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.331807 4848 scope.go:117] "RemoveContainer" containerID="ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb" Jan 28 12:50:34 crc kubenswrapper[4848]: E0128 12:50:34.332228 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\": container with ID starting with ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb not found: ID does not exist" containerID="ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.332292 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb"} err="failed to get container status \"ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\": rpc error: code = NotFound desc = could not find container \"ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb\": container with ID starting with ee340d1d8d0bf661e542c10a3dd890479444cb85144b35edc2860f3ad4bf6edb not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.332316 4848 scope.go:117] "RemoveContainer" containerID="678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf" Jan 28 12:50:34 crc kubenswrapper[4848]: E0128 12:50:34.332737 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\": container with ID starting with 678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf not found: ID does not exist" containerID="678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.332780 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf"} err="failed to get container status \"678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\": rpc error: code = NotFound desc = could not find container \"678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf\": container with ID starting with 678f4b98041bb79a5e50b6e82d6598fb742b70e3c6abf88b6e90faeb94420ecf not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4848]: I0128 12:50:34.859201 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.184405 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.185090 4848 status_manager.go:851] "Failed to get status for pod" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" pod="openshift-marketplace/redhat-operators-4zt82" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-4zt82\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.185491 4848 status_manager.go:851] "Failed to get status for pod" podUID="6f754cdc-1dde-4331-842d-824d719b4255" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.185859 4848 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.186245 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.189565 4848 status_manager.go:851] "Failed to get status for pod" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" pod="openshift-marketplace/redhat-operators-4zt82" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-4zt82\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.189977 4848 status_manager.go:851] "Failed to get status for pod" podUID="6f754cdc-1dde-4331-842d-824d719b4255" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.190387 4848 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:36 crc kubenswrapper[4848]: I0128 12:50:36.190986 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:40 crc kubenswrapper[4848]: E0128 12:50:40.503334 4848 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:40 crc kubenswrapper[4848]: E0128 12:50:40.504567 4848 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:40 crc kubenswrapper[4848]: E0128 12:50:40.505464 4848 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:40 crc kubenswrapper[4848]: E0128 12:50:40.505834 4848 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:40 crc kubenswrapper[4848]: E0128 12:50:40.506664 4848 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:40 crc kubenswrapper[4848]: I0128 12:50:40.506700 4848 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 28 12:50:40 crc kubenswrapper[4848]: E0128 12:50:40.506937 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="200ms" Jan 28 12:50:40 crc kubenswrapper[4848]: E0128 12:50:40.708130 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="400ms" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.109668 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="800ms" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.548806 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:41Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:41Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:41Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:50:41Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:18138ad13c0ce7aec741afb6548dfcdea4ed713ab86fb632d61d11024bb26e33\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:19e2999ecbbb8cab07d01f6dc5abba647ba484961c73972a7cc782bc7b6dd669\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1675432702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:68c28a690c4c3482a63d6de9cf3b80304e983243444eb4d2c5fcaf5c051eb54b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:a273081c72178c20c79eca9b18dbb926d33a6bb826b215c14de6b31207e497ca\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202349806},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:35ef22cdc5de4770d000187db87519d6c455220f78dd8f959ad8a1ff79d29272\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:ad441b86561c9626edb6ee12716deff56722d3a1f55eb3e1e310855efcee9888\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1187231476},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:420326d8488ceff2cde22ad8b85d739b0c254d47e703f7ddb1f08f77a48816a6\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:54817da328fa589491a3acbe80acdd88c0830dcc63aaafc08c3539925a1a3b03\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1180692192},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.549221 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.549779 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.550503 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.550811 4848 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.550843 4848 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.792070 4848 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.138:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188ee6095d7aba79 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:50:31.500601977 +0000 UTC m=+258.412819015,LastTimestamp:2026-01-28 12:50:31.500601977 +0000 UTC m=+258.412819015,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:50:41 crc kubenswrapper[4848]: E0128 12:50:41.911269 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="1.6s" Jan 28 12:50:43 crc kubenswrapper[4848]: E0128 12:50:43.513102 4848 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.138:6443: connect: connection refused" interval="3.2s" Jan 28 12:50:44 crc kubenswrapper[4848]: I0128 12:50:44.851880 4848 status_manager.go:851] "Failed to get status for pod" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" pod="openshift-marketplace/redhat-operators-4zt82" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-4zt82\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:44 crc kubenswrapper[4848]: I0128 12:50:44.852750 4848 status_manager.go:851] "Failed to get status for pod" podUID="6f754cdc-1dde-4331-842d-824d719b4255" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:44 crc kubenswrapper[4848]: I0128 12:50:44.853140 4848 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:44 crc kubenswrapper[4848]: I0128 12:50:44.853541 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.217520 4848 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.217940 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.271175 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.271295 4848 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04" exitCode=1 Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.271328 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04"} Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.272047 4848 scope.go:117] "RemoveContainer" containerID="7171fdb7646dca792050006a66a50591dfc0ef7cb546540f2746e73e2d387c04" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.272337 4848 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.272751 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.273307 4848 status_manager.go:851] "Failed to get status for pod" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" pod="openshift-marketplace/redhat-operators-4zt82" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-4zt82\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.273791 4848 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.274483 4848 status_manager.go:851] "Failed to get status for pod" podUID="6f754cdc-1dde-4331-842d-824d719b4255" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.849780 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.851066 4848 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.851664 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.852019 4848 status_manager.go:851] "Failed to get status for pod" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" pod="openshift-marketplace/redhat-operators-4zt82" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-4zt82\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.852304 4848 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.853068 4848 status_manager.go:851] "Failed to get status for pod" podUID="6f754cdc-1dde-4331-842d-824d719b4255" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.867798 4848 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.867835 4848 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:45 crc kubenswrapper[4848]: E0128 12:50:45.868414 4848 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:45 crc kubenswrapper[4848]: I0128 12:50:45.869068 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:45 crc kubenswrapper[4848]: W0128 12:50:45.905735 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-6f4cc1028e952ee437b7d6397d0b64d626e74c061237a9ff886d3633cf56270e WatchSource:0}: Error finding container 6f4cc1028e952ee437b7d6397d0b64d626e74c061237a9ff886d3633cf56270e: Status 404 returned error can't find the container with id 6f4cc1028e952ee437b7d6397d0b64d626e74c061237a9ff886d3633cf56270e Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.285235 4848 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="a373b996f3330c05bab883ce4b9155e62e3ef7b0d6bbc8341adf4e021d2f62d6" exitCode=0 Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.285380 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"a373b996f3330c05bab883ce4b9155e62e3ef7b0d6bbc8341adf4e021d2f62d6"} Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.285762 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"6f4cc1028e952ee437b7d6397d0b64d626e74c061237a9ff886d3633cf56270e"} Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.286152 4848 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.286185 4848 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.286720 4848 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: E0128 12:50:46.286866 4848 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.287074 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.287412 4848 status_manager.go:851] "Failed to get status for pod" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" pod="openshift-marketplace/redhat-operators-4zt82" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-4zt82\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.287695 4848 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.287970 4848 status_manager.go:851] "Failed to get status for pod" podUID="6f754cdc-1dde-4331-842d-824d719b4255" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.291675 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.291737 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"da70db8f5e071aa689de183296d58c2c0fb1f56fe04f0792025388789a22dd9f"} Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.292417 4848 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.292782 4848 status_manager.go:851] "Failed to get status for pod" podUID="6f754cdc-1dde-4331-842d-824d719b4255" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.293216 4848 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.293584 4848 status_manager.go:851] "Failed to get status for pod" podUID="32f32501-893c-4f73-ad89-3c4f37c7a422" pod="openshift-authentication/oauth-openshift-574b75df8-z2728" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-574b75df8-z2728\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:46 crc kubenswrapper[4848]: I0128 12:50:46.294098 4848 status_manager.go:851] "Failed to get status for pod" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" pod="openshift-marketplace/redhat-operators-4zt82" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-4zt82\": dial tcp 38.102.83.138:6443: connect: connection refused" Jan 28 12:50:47 crc kubenswrapper[4848]: I0128 12:50:47.304026 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e3ce8a732d998f095feebf58a185de87f4c3ebbd94486b618c650c1402349f27"} Jan 28 12:50:47 crc kubenswrapper[4848]: I0128 12:50:47.304070 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"74246e63e22390d0fc45616b35d09f4a85306ea5076ef33877bbe999d5ea709a"} Jan 28 12:50:47 crc kubenswrapper[4848]: I0128 12:50:47.304079 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"974f5a58503b3e95010588da1428dcd41275922988ac609aaf6aa53967922dae"} Jan 28 12:50:47 crc kubenswrapper[4848]: I0128 12:50:47.304088 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e4a304a8fb0ca2d98cada286a6d4d674445c6e58354d202c4836938e8326c482"} Jan 28 12:50:48 crc kubenswrapper[4848]: I0128 12:50:48.313860 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8bf40cd26eb65fb6e76752b0fab8a84485a3a4cf38a329451c997a965b80d0d9"} Jan 28 12:50:48 crc kubenswrapper[4848]: I0128 12:50:48.314286 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:48 crc kubenswrapper[4848]: I0128 12:50:48.314239 4848 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:48 crc kubenswrapper[4848]: I0128 12:50:48.314335 4848 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:50 crc kubenswrapper[4848]: I0128 12:50:50.478013 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:50:50 crc kubenswrapper[4848]: I0128 12:50:50.870318 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:50 crc kubenswrapper[4848]: I0128 12:50:50.870390 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:50 crc kubenswrapper[4848]: I0128 12:50:50.880984 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:53 crc kubenswrapper[4848]: I0128 12:50:53.325747 4848 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:54 crc kubenswrapper[4848]: I0128 12:50:54.350218 4848 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:54 crc kubenswrapper[4848]: I0128 12:50:54.350273 4848 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:54 crc kubenswrapper[4848]: I0128 12:50:54.353761 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:50:54 crc kubenswrapper[4848]: I0128 12:50:54.806191 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:50:54 crc kubenswrapper[4848]: I0128 12:50:54.810338 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:50:54 crc kubenswrapper[4848]: I0128 12:50:54.874871 4848 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="58c410d1-e8e2-416e-97b5-0b1d09713114" Jan 28 12:50:55 crc kubenswrapper[4848]: I0128 12:50:55.356484 4848 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:55 crc kubenswrapper[4848]: I0128 12:50:55.356524 4848 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5b93da01-044f-4540-8248-6d19f14ce06d" Jan 28 12:50:55 crc kubenswrapper[4848]: I0128 12:50:55.360073 4848 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="58c410d1-e8e2-416e-97b5-0b1d09713114" Jan 28 12:50:55 crc kubenswrapper[4848]: I0128 12:50:55.364795 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:51:03 crc kubenswrapper[4848]: I0128 12:51:03.108873 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 12:51:03 crc kubenswrapper[4848]: I0128 12:51:03.773660 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 12:51:04 crc kubenswrapper[4848]: I0128 12:51:04.298831 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 12:51:04 crc kubenswrapper[4848]: I0128 12:51:04.314353 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 12:51:04 crc kubenswrapper[4848]: I0128 12:51:04.363599 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 12:51:04 crc kubenswrapper[4848]: I0128 12:51:04.753600 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 12:51:04 crc kubenswrapper[4848]: I0128 12:51:04.838962 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 12:51:04 crc kubenswrapper[4848]: I0128 12:51:04.985755 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.096675 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.140989 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.201415 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.522278 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.541141 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.544749 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.719079 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.744717 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.816260 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 12:51:05 crc kubenswrapper[4848]: I0128 12:51:05.879107 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.015524 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.107425 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.197662 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.306906 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.393780 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.463104 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.474358 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.581862 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.583790 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.672805 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.720635 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.737929 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.764663 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.776790 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.845472 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 12:51:06 crc kubenswrapper[4848]: I0128 12:51:06.883205 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.009470 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.063492 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.077444 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.198707 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.232805 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.406476 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.409383 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.603026 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.610842 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.631290 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.673750 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.697168 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.711125 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.722542 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.727443 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.753296 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.855326 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.869975 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.879732 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.893658 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 12:51:07 crc kubenswrapper[4848]: I0128 12:51:07.975693 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.008759 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.021611 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.068011 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.092575 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.110364 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.165416 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.198924 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.465687 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.492001 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.512719 4848 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.521516 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.572989 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.598279 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.647607 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.749312 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.776331 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.790633 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.796775 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.871611 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.889911 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 12:51:08 crc kubenswrapper[4848]: I0128 12:51:08.920902 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.012376 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.016828 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.046374 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.062203 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.089630 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.199849 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.307861 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.319800 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.370136 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.398445 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.423603 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.499748 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.522684 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.559504 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.666013 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.818991 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.847590 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.926161 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.954314 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.961511 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 12:51:09 crc kubenswrapper[4848]: I0128 12:51:09.963539 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.023663 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.054544 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.101476 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.456015 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.461402 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.481941 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.588184 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.607095 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.651897 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.716442 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.772083 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.797868 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.839081 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.893141 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.905930 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 12:51:10 crc kubenswrapper[4848]: I0128 12:51:10.982573 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.006838 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.012682 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.128098 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.162875 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.191710 4848 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.205123 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.283479 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.464050 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.585054 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.588787 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.656061 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.666166 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.747997 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.748416 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.867726 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.926668 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.941796 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 12:51:11 crc kubenswrapper[4848]: I0128 12:51:11.992961 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.004734 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.027989 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.130770 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.181966 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.224283 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.258661 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.295974 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.421840 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.428287 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.429357 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.440764 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.471804 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.571607 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.621778 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.628581 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.798900 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.879898 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.908581 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.944949 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.950076 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.958913 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 12:51:12 crc kubenswrapper[4848]: I0128 12:51:12.971321 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.000794 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.003957 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.097691 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.134636 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.187195 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.299064 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.339092 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.442411 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.801623 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.828826 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 12:51:13 crc kubenswrapper[4848]: I0128 12:51:13.887135 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.161301 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.213408 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.335642 4848 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.337129 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.389008 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.417173 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.426390 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.525088 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.532620 4848 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.568097 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.578694 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.620061 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.642113 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.713566 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.823204 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.830333 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.896770 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.908625 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.959108 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 12:51:14 crc kubenswrapper[4848]: I0128 12:51:14.964118 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.096438 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.116657 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.116723 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.155132 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.158083 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.294240 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.333110 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.362815 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.398767 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.521182 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.594793 4848 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.608800 4848 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.666334 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.797325 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.799875 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.822293 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.823868 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.835671 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.840174 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.878783 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.887825 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.955236 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 12:51:15 crc kubenswrapper[4848]: I0128 12:51:15.991850 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.011584 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.109710 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.146044 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.175016 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.229717 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.251139 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.349478 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.485498 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.494669 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.532834 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.564823 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.609708 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.636537 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.703967 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.783601 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.844609 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.946877 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:51:16 crc kubenswrapper[4848]: I0128 12:51:16.996619 4848 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.001652 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=46.001627867 podStartE2EDuration="46.001627867s" podCreationTimestamp="2026-01-28 12:50:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:53.35001466 +0000 UTC m=+280.262231698" watchObservedRunningTime="2026-01-28 12:51:17.001627867 +0000 UTC m=+303.913844905" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.002650 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4zt82","openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.002723 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.008391 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.036943 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.036911106 podStartE2EDuration="24.036911106s" podCreationTimestamp="2026-01-28 12:50:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:17.02734852 +0000 UTC m=+303.939565568" watchObservedRunningTime="2026-01-28 12:51:17.036911106 +0000 UTC m=+303.949128144" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.129472 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.278331 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.352946 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.406842 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.477322 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.490473 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.674513 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.686898 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.784238 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.888593 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:51:17 crc kubenswrapper[4848]: I0128 12:51:17.946459 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.009627 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.039785 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.124992 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.181592 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.212566 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.347684 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.349206 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.414218 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.419327 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.454135 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.503970 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.522266 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.960514 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.960671 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.961021 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 12:51:18 crc kubenswrapper[4848]: I0128 12:51:18.970670 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff062c59-745a-4664-b98f-f2fb669edf1f" path="/var/lib/kubelet/pods/ff062c59-745a-4664-b98f-f2fb669edf1f/volumes" Jan 28 12:51:19 crc kubenswrapper[4848]: I0128 12:51:19.795958 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 12:51:27 crc kubenswrapper[4848]: I0128 12:51:27.209893 4848 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:51:27 crc kubenswrapper[4848]: I0128 12:51:27.210998 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://587592f675d31a835ffa36a223de4d844b13e82059ce53f6cdae2a6a6af1470f" gracePeriod=5 Jan 28 12:51:31 crc kubenswrapper[4848]: I0128 12:51:31.591494 4848 generic.go:334] "Generic (PLEG): container finished" podID="0d553491-aa2a-495d-b02c-73a52d29278b" containerID="11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0" exitCode=0 Jan 28 12:51:31 crc kubenswrapper[4848]: I0128 12:51:31.591718 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" event={"ID":"0d553491-aa2a-495d-b02c-73a52d29278b","Type":"ContainerDied","Data":"11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0"} Jan 28 12:51:31 crc kubenswrapper[4848]: I0128 12:51:31.593539 4848 scope.go:117] "RemoveContainer" containerID="11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.600864 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.601138 4848 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="587592f675d31a835ffa36a223de4d844b13e82059ce53f6cdae2a6a6af1470f" exitCode=137 Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.603938 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" event={"ID":"0d553491-aa2a-495d-b02c-73a52d29278b","Type":"ContainerStarted","Data":"8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad"} Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.604326 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.606071 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.782724 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.782798 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.856548 4848 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.867504 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.867537 4848 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="5c5c7d5b-4327-482a-ade4-3101e266d2d0" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870045 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870067 4848 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="5c5c7d5b-4327-482a-ade4-3101e266d2d0" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870410 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870524 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870589 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870593 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870623 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870644 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870682 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870745 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870749 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870977 4848 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.870994 4848 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.871680 4848 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.871705 4848 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.878287 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:32 crc kubenswrapper[4848]: I0128 12:51:32.972996 4848 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:33 crc kubenswrapper[4848]: I0128 12:51:33.610078 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 12:51:33 crc kubenswrapper[4848]: I0128 12:51:33.610397 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:51:33 crc kubenswrapper[4848]: I0128 12:51:33.610487 4848 scope.go:117] "RemoveContainer" containerID="587592f675d31a835ffa36a223de4d844b13e82059ce53f6cdae2a6a6af1470f" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.435036 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-857b45fc74-sfl86"] Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.436681 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" podUID="f1f4e51e-7e30-400d-a472-c398091b68d6" containerName="controller-manager" containerID="cri-o://2c30db9a8880dc77c5a2ebb9c106d407c59c1a21e0041989f6aa6a6fd5b4a1f1" gracePeriod=30 Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.538104 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-fb5964489-2598k"] Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.538388 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" podUID="0f895523-c590-4a7e-8070-7434eac8246e" containerName="route-controller-manager" containerID="cri-o://934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d" gracePeriod=30 Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.618030 4848 generic.go:334] "Generic (PLEG): container finished" podID="f1f4e51e-7e30-400d-a472-c398091b68d6" containerID="2c30db9a8880dc77c5a2ebb9c106d407c59c1a21e0041989f6aa6a6fd5b4a1f1" exitCode=0 Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.618349 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" event={"ID":"f1f4e51e-7e30-400d-a472-c398091b68d6","Type":"ContainerDied","Data":"2c30db9a8880dc77c5a2ebb9c106d407c59c1a21e0041989f6aa6a6fd5b4a1f1"} Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.859851 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.864399 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.899036 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-config\") pod \"f1f4e51e-7e30-400d-a472-c398091b68d6\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.899521 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1f4e51e-7e30-400d-a472-c398091b68d6-serving-cert\") pod \"f1f4e51e-7e30-400d-a472-c398091b68d6\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.899799 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-proxy-ca-bundles\") pod \"f1f4e51e-7e30-400d-a472-c398091b68d6\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.899933 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bh2dp\" (UniqueName: \"kubernetes.io/projected/f1f4e51e-7e30-400d-a472-c398091b68d6-kube-api-access-bh2dp\") pod \"f1f4e51e-7e30-400d-a472-c398091b68d6\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.900093 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-client-ca\") pod \"f1f4e51e-7e30-400d-a472-c398091b68d6\" (UID: \"f1f4e51e-7e30-400d-a472-c398091b68d6\") " Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.900956 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-config" (OuterVolumeSpecName: "config") pod "f1f4e51e-7e30-400d-a472-c398091b68d6" (UID: "f1f4e51e-7e30-400d-a472-c398091b68d6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.901612 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f1f4e51e-7e30-400d-a472-c398091b68d6" (UID: "f1f4e51e-7e30-400d-a472-c398091b68d6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.902531 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-client-ca" (OuterVolumeSpecName: "client-ca") pod "f1f4e51e-7e30-400d-a472-c398091b68d6" (UID: "f1f4e51e-7e30-400d-a472-c398091b68d6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.907089 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1f4e51e-7e30-400d-a472-c398091b68d6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f1f4e51e-7e30-400d-a472-c398091b68d6" (UID: "f1f4e51e-7e30-400d-a472-c398091b68d6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.907183 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1f4e51e-7e30-400d-a472-c398091b68d6-kube-api-access-bh2dp" (OuterVolumeSpecName: "kube-api-access-bh2dp") pod "f1f4e51e-7e30-400d-a472-c398091b68d6" (UID: "f1f4e51e-7e30-400d-a472-c398091b68d6"). InnerVolumeSpecName "kube-api-access-bh2dp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:34 crc kubenswrapper[4848]: I0128 12:51:34.915436 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.001774 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-config\") pod \"0f895523-c590-4a7e-8070-7434eac8246e\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.001840 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztjw5\" (UniqueName: \"kubernetes.io/projected/0f895523-c590-4a7e-8070-7434eac8246e-kube-api-access-ztjw5\") pod \"0f895523-c590-4a7e-8070-7434eac8246e\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.001885 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-client-ca\") pod \"0f895523-c590-4a7e-8070-7434eac8246e\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.001931 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f895523-c590-4a7e-8070-7434eac8246e-serving-cert\") pod \"0f895523-c590-4a7e-8070-7434eac8246e\" (UID: \"0f895523-c590-4a7e-8070-7434eac8246e\") " Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.002210 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.002224 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.002233 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1f4e51e-7e30-400d-a472-c398091b68d6-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.002243 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f1f4e51e-7e30-400d-a472-c398091b68d6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.002269 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bh2dp\" (UniqueName: \"kubernetes.io/projected/f1f4e51e-7e30-400d-a472-c398091b68d6-kube-api-access-bh2dp\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.002785 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-config" (OuterVolumeSpecName: "config") pod "0f895523-c590-4a7e-8070-7434eac8246e" (UID: "0f895523-c590-4a7e-8070-7434eac8246e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.002927 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-client-ca" (OuterVolumeSpecName: "client-ca") pod "0f895523-c590-4a7e-8070-7434eac8246e" (UID: "0f895523-c590-4a7e-8070-7434eac8246e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.006003 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f895523-c590-4a7e-8070-7434eac8246e-kube-api-access-ztjw5" (OuterVolumeSpecName: "kube-api-access-ztjw5") pod "0f895523-c590-4a7e-8070-7434eac8246e" (UID: "0f895523-c590-4a7e-8070-7434eac8246e"). InnerVolumeSpecName "kube-api-access-ztjw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.006548 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f895523-c590-4a7e-8070-7434eac8246e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0f895523-c590-4a7e-8070-7434eac8246e" (UID: "0f895523-c590-4a7e-8070-7434eac8246e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.103852 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.103893 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztjw5\" (UniqueName: \"kubernetes.io/projected/0f895523-c590-4a7e-8070-7434eac8246e-kube-api-access-ztjw5\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.103909 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f895523-c590-4a7e-8070-7434eac8246e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.103921 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f895523-c590-4a7e-8070-7434eac8246e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.627392 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.627391 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-857b45fc74-sfl86" event={"ID":"f1f4e51e-7e30-400d-a472-c398091b68d6","Type":"ContainerDied","Data":"9f16d55e3b06b71fddd8051c712ac2bd3d2da9d7b8c1b2cd504080dfa8aac92d"} Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.627649 4848 scope.go:117] "RemoveContainer" containerID="2c30db9a8880dc77c5a2ebb9c106d407c59c1a21e0041989f6aa6a6fd5b4a1f1" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.630872 4848 generic.go:334] "Generic (PLEG): container finished" podID="0f895523-c590-4a7e-8070-7434eac8246e" containerID="934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d" exitCode=0 Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.630935 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.630967 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" event={"ID":"0f895523-c590-4a7e-8070-7434eac8246e","Type":"ContainerDied","Data":"934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d"} Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.631027 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-fb5964489-2598k" event={"ID":"0f895523-c590-4a7e-8070-7434eac8246e","Type":"ContainerDied","Data":"e038dfc1d0f7561a2add9345d26ead8e98b0a757adeec4ea224140b5b146cd2f"} Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.646637 4848 scope.go:117] "RemoveContainer" containerID="934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.664073 4848 scope.go:117] "RemoveContainer" containerID="934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d" Jan 28 12:51:35 crc kubenswrapper[4848]: E0128 12:51:35.665028 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d\": container with ID starting with 934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d not found: ID does not exist" containerID="934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.665106 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d"} err="failed to get container status \"934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d\": rpc error: code = NotFound desc = could not find container \"934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d\": container with ID starting with 934654893b4b33a850f2b0815f204535bb78a64f97639a36a0b288c7a30a3e2d not found: ID does not exist" Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.669032 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-857b45fc74-sfl86"] Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.673932 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-857b45fc74-sfl86"] Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.677442 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-fb5964489-2598k"] Jan 28 12:51:35 crc kubenswrapper[4848]: I0128 12:51:35.680641 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-fb5964489-2598k"] Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.197575 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-zr2w7"] Jan 28 12:51:36 crc kubenswrapper[4848]: E0128 12:51:36.197941 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f895523-c590-4a7e-8070-7434eac8246e" containerName="route-controller-manager" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.197959 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f895523-c590-4a7e-8070-7434eac8246e" containerName="route-controller-manager" Jan 28 12:51:36 crc kubenswrapper[4848]: E0128 12:51:36.197982 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.197989 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 12:51:36 crc kubenswrapper[4848]: E0128 12:51:36.198010 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f4e51e-7e30-400d-a472-c398091b68d6" containerName="controller-manager" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.198028 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f4e51e-7e30-400d-a472-c398091b68d6" containerName="controller-manager" Jan 28 12:51:36 crc kubenswrapper[4848]: E0128 12:51:36.198037 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f754cdc-1dde-4331-842d-824d719b4255" containerName="installer" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.198044 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f754cdc-1dde-4331-842d-824d719b4255" containerName="installer" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.198150 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f754cdc-1dde-4331-842d-824d719b4255" containerName="installer" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.198164 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.198175 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1f4e51e-7e30-400d-a472-c398091b68d6" containerName="controller-manager" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.198183 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f895523-c590-4a7e-8070-7434eac8246e" containerName="route-controller-manager" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.198708 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.203511 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.203748 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.205211 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.205501 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.205743 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.205871 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.212349 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk"] Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.213705 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.213702 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.216191 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.216676 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.218795 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.219008 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.219424 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-client-ca\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.219532 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxt67\" (UniqueName: \"kubernetes.io/projected/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-kube-api-access-rxt67\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.219556 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-proxy-ca-bundles\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.219575 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-serving-cert\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.219603 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-config\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.220187 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.220559 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.226113 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-zr2w7"] Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.231146 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk"] Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320421 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxt67\" (UniqueName: \"kubernetes.io/projected/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-kube-api-access-rxt67\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320464 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-client-ca\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320487 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-serving-cert\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320504 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-proxy-ca-bundles\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320531 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-config\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320568 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-client-ca\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320585 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-config\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320608 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51a76a23-8af8-439c-9a57-46b7845cdfab-serving-cert\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.320643 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26r67\" (UniqueName: \"kubernetes.io/projected/51a76a23-8af8-439c-9a57-46b7845cdfab-kube-api-access-26r67\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.321849 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-client-ca\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.321907 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-config\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.323363 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-proxy-ca-bundles\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.323929 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-serving-cert\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.336814 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxt67\" (UniqueName: \"kubernetes.io/projected/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-kube-api-access-rxt67\") pod \"controller-manager-8697fb7788-zr2w7\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.421476 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-config\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.421541 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51a76a23-8af8-439c-9a57-46b7845cdfab-serving-cert\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.421589 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26r67\" (UniqueName: \"kubernetes.io/projected/51a76a23-8af8-439c-9a57-46b7845cdfab-kube-api-access-26r67\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.421655 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-client-ca\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.423186 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-client-ca\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.423229 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-config\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.424992 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51a76a23-8af8-439c-9a57-46b7845cdfab-serving-cert\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.437464 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26r67\" (UniqueName: \"kubernetes.io/projected/51a76a23-8af8-439c-9a57-46b7845cdfab-kube-api-access-26r67\") pod \"route-controller-manager-74d95f9b48-jdvwk\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.536902 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.544011 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.834563 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk"] Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.863020 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f895523-c590-4a7e-8070-7434eac8246e" path="/var/lib/kubelet/pods/0f895523-c590-4a7e-8070-7434eac8246e/volumes" Jan 28 12:51:36 crc kubenswrapper[4848]: I0128 12:51:36.864077 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1f4e51e-7e30-400d-a472-c398091b68d6" path="/var/lib/kubelet/pods/f1f4e51e-7e30-400d-a472-c398091b68d6/volumes" Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.085338 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-zr2w7"] Jan 28 12:51:37 crc kubenswrapper[4848]: W0128 12:51:37.090511 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66b6531a_60c9_4ca0_b5b4_8d981353c6e9.slice/crio-4341c976df8afaf529ae80bb699123f6832764c84ac71f678399ee2b17bcee8b WatchSource:0}: Error finding container 4341c976df8afaf529ae80bb699123f6832764c84ac71f678399ee2b17bcee8b: Status 404 returned error can't find the container with id 4341c976df8afaf529ae80bb699123f6832764c84ac71f678399ee2b17bcee8b Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.703130 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" event={"ID":"66b6531a-60c9-4ca0-b5b4-8d981353c6e9","Type":"ContainerStarted","Data":"06d1a1ed7d9cb545f31a3270c49bcdd2e945423623de7c3538f376b3ced9f17e"} Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.703173 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" event={"ID":"66b6531a-60c9-4ca0-b5b4-8d981353c6e9","Type":"ContainerStarted","Data":"4341c976df8afaf529ae80bb699123f6832764c84ac71f678399ee2b17bcee8b"} Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.703460 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.704713 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" event={"ID":"51a76a23-8af8-439c-9a57-46b7845cdfab","Type":"ContainerStarted","Data":"acb00bc1a8c8c70f81c62d52a1c09038fa40894e823a984906b438fae0ed0402"} Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.704757 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" event={"ID":"51a76a23-8af8-439c-9a57-46b7845cdfab","Type":"ContainerStarted","Data":"4795fd7aa0abf35dcc7f2432ecc3f28373fe3a3d2ed350ae5d721c3dd5fd283c"} Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.705238 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.709153 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.712667 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.737161 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" podStartSLOduration=3.737145581 podStartE2EDuration="3.737145581s" podCreationTimestamp="2026-01-28 12:51:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:37.73567934 +0000 UTC m=+324.647896408" watchObservedRunningTime="2026-01-28 12:51:37.737145581 +0000 UTC m=+324.649362609" Jan 28 12:51:37 crc kubenswrapper[4848]: I0128 12:51:37.833936 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" podStartSLOduration=3.833917489 podStartE2EDuration="3.833917489s" podCreationTimestamp="2026-01-28 12:51:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:37.76509351 +0000 UTC m=+324.677310568" watchObservedRunningTime="2026-01-28 12:51:37.833917489 +0000 UTC m=+324.746134527" Jan 28 12:51:47 crc kubenswrapper[4848]: I0128 12:51:47.521101 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.420591 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-zr2w7"] Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.421303 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" podUID="66b6531a-60c9-4ca0-b5b4-8d981353c6e9" containerName="controller-manager" containerID="cri-o://06d1a1ed7d9cb545f31a3270c49bcdd2e945423623de7c3538f376b3ced9f17e" gracePeriod=30 Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.433420 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk"] Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.433688 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" podUID="51a76a23-8af8-439c-9a57-46b7845cdfab" containerName="route-controller-manager" containerID="cri-o://acb00bc1a8c8c70f81c62d52a1c09038fa40894e823a984906b438fae0ed0402" gracePeriod=30 Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.808869 4848 generic.go:334] "Generic (PLEG): container finished" podID="66b6531a-60c9-4ca0-b5b4-8d981353c6e9" containerID="06d1a1ed7d9cb545f31a3270c49bcdd2e945423623de7c3538f376b3ced9f17e" exitCode=0 Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.808960 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" event={"ID":"66b6531a-60c9-4ca0-b5b4-8d981353c6e9","Type":"ContainerDied","Data":"06d1a1ed7d9cb545f31a3270c49bcdd2e945423623de7c3538f376b3ced9f17e"} Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.815789 4848 generic.go:334] "Generic (PLEG): container finished" podID="51a76a23-8af8-439c-9a57-46b7845cdfab" containerID="acb00bc1a8c8c70f81c62d52a1c09038fa40894e823a984906b438fae0ed0402" exitCode=0 Jan 28 12:51:54 crc kubenswrapper[4848]: I0128 12:51:54.815882 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" event={"ID":"51a76a23-8af8-439c-9a57-46b7845cdfab","Type":"ContainerDied","Data":"acb00bc1a8c8c70f81c62d52a1c09038fa40894e823a984906b438fae0ed0402"} Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.056941 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.062030 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243463 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-config\") pod \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243520 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-serving-cert\") pod \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243546 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51a76a23-8af8-439c-9a57-46b7845cdfab-serving-cert\") pod \"51a76a23-8af8-439c-9a57-46b7845cdfab\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243590 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-config\") pod \"51a76a23-8af8-439c-9a57-46b7845cdfab\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243641 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxt67\" (UniqueName: \"kubernetes.io/projected/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-kube-api-access-rxt67\") pod \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243662 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-client-ca\") pod \"51a76a23-8af8-439c-9a57-46b7845cdfab\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243690 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-client-ca\") pod \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243715 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-proxy-ca-bundles\") pod \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\" (UID: \"66b6531a-60c9-4ca0-b5b4-8d981353c6e9\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.243739 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26r67\" (UniqueName: \"kubernetes.io/projected/51a76a23-8af8-439c-9a57-46b7845cdfab-kube-api-access-26r67\") pod \"51a76a23-8af8-439c-9a57-46b7845cdfab\" (UID: \"51a76a23-8af8-439c-9a57-46b7845cdfab\") " Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.244547 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-config" (OuterVolumeSpecName: "config") pod "66b6531a-60c9-4ca0-b5b4-8d981353c6e9" (UID: "66b6531a-60c9-4ca0-b5b4-8d981353c6e9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.244518 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-config" (OuterVolumeSpecName: "config") pod "51a76a23-8af8-439c-9a57-46b7845cdfab" (UID: "51a76a23-8af8-439c-9a57-46b7845cdfab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.244841 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-client-ca" (OuterVolumeSpecName: "client-ca") pod "66b6531a-60c9-4ca0-b5b4-8d981353c6e9" (UID: "66b6531a-60c9-4ca0-b5b4-8d981353c6e9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.245228 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "66b6531a-60c9-4ca0-b5b4-8d981353c6e9" (UID: "66b6531a-60c9-4ca0-b5b4-8d981353c6e9"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.245232 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-client-ca" (OuterVolumeSpecName: "client-ca") pod "51a76a23-8af8-439c-9a57-46b7845cdfab" (UID: "51a76a23-8af8-439c-9a57-46b7845cdfab"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.249543 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-kube-api-access-rxt67" (OuterVolumeSpecName: "kube-api-access-rxt67") pod "66b6531a-60c9-4ca0-b5b4-8d981353c6e9" (UID: "66b6531a-60c9-4ca0-b5b4-8d981353c6e9"). InnerVolumeSpecName "kube-api-access-rxt67". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.249703 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51a76a23-8af8-439c-9a57-46b7845cdfab-kube-api-access-26r67" (OuterVolumeSpecName: "kube-api-access-26r67") pod "51a76a23-8af8-439c-9a57-46b7845cdfab" (UID: "51a76a23-8af8-439c-9a57-46b7845cdfab"). InnerVolumeSpecName "kube-api-access-26r67". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.250193 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "66b6531a-60c9-4ca0-b5b4-8d981353c6e9" (UID: "66b6531a-60c9-4ca0-b5b4-8d981353c6e9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.251403 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51a76a23-8af8-439c-9a57-46b7845cdfab-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "51a76a23-8af8-439c-9a57-46b7845cdfab" (UID: "51a76a23-8af8-439c-9a57-46b7845cdfab"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345600 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345639 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345678 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26r67\" (UniqueName: \"kubernetes.io/projected/51a76a23-8af8-439c-9a57-46b7845cdfab-kube-api-access-26r67\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345687 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345695 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345705 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/51a76a23-8af8-439c-9a57-46b7845cdfab-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345712 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345720 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxt67\" (UniqueName: \"kubernetes.io/projected/66b6531a-60c9-4ca0-b5b4-8d981353c6e9-kube-api-access-rxt67\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.345728 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/51a76a23-8af8-439c-9a57-46b7845cdfab-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.822578 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.822520 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk" event={"ID":"51a76a23-8af8-439c-9a57-46b7845cdfab","Type":"ContainerDied","Data":"4795fd7aa0abf35dcc7f2432ecc3f28373fe3a3d2ed350ae5d721c3dd5fd283c"} Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.822739 4848 scope.go:117] "RemoveContainer" containerID="acb00bc1a8c8c70f81c62d52a1c09038fa40894e823a984906b438fae0ed0402" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.825160 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" event={"ID":"66b6531a-60c9-4ca0-b5b4-8d981353c6e9","Type":"ContainerDied","Data":"4341c976df8afaf529ae80bb699123f6832764c84ac71f678399ee2b17bcee8b"} Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.825234 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697fb7788-zr2w7" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.842653 4848 scope.go:117] "RemoveContainer" containerID="06d1a1ed7d9cb545f31a3270c49bcdd2e945423623de7c3538f376b3ced9f17e" Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.858204 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-zr2w7"] Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.861853 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-zr2w7"] Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.869383 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk"] Jan 28 12:51:55 crc kubenswrapper[4848]: I0128 12:51:55.874063 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-jdvwk"] Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.276567 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl"] Jan 28 12:51:56 crc kubenswrapper[4848]: E0128 12:51:56.277100 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66b6531a-60c9-4ca0-b5b4-8d981353c6e9" containerName="controller-manager" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.277116 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="66b6531a-60c9-4ca0-b5b4-8d981353c6e9" containerName="controller-manager" Jan 28 12:51:56 crc kubenswrapper[4848]: E0128 12:51:56.277131 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51a76a23-8af8-439c-9a57-46b7845cdfab" containerName="route-controller-manager" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.277139 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="51a76a23-8af8-439c-9a57-46b7845cdfab" containerName="route-controller-manager" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.277265 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="66b6531a-60c9-4ca0-b5b4-8d981353c6e9" containerName="controller-manager" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.277294 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="51a76a23-8af8-439c-9a57-46b7845cdfab" containerName="route-controller-manager" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.277688 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.280333 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk"] Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.281041 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.282441 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.282592 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.282657 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.282656 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.282980 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.283057 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.283364 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.291954 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.292208 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.292612 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.292859 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.292984 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.292930 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.298329 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk"] Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.301682 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl"] Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459397 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6658\" (UniqueName: \"kubernetes.io/projected/3563f548-9b84-485d-992f-c2f4998bf348-kube-api-access-c6658\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459595 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-client-ca\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459647 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-client-ca\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459730 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-proxy-ca-bundles\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459796 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-config\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459825 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rv5b\" (UniqueName: \"kubernetes.io/projected/29bee547-2bd7-4b10-852c-3178a224dea0-kube-api-access-4rv5b\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459858 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-config\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459877 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29bee547-2bd7-4b10-852c-3178a224dea0-serving-cert\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.459899 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3563f548-9b84-485d-992f-c2f4998bf348-serving-cert\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561165 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3563f548-9b84-485d-992f-c2f4998bf348-serving-cert\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561232 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6658\" (UniqueName: \"kubernetes.io/projected/3563f548-9b84-485d-992f-c2f4998bf348-kube-api-access-c6658\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561274 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-client-ca\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561291 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-client-ca\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561312 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-proxy-ca-bundles\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561342 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-config\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561359 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rv5b\" (UniqueName: \"kubernetes.io/projected/29bee547-2bd7-4b10-852c-3178a224dea0-kube-api-access-4rv5b\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561386 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-config\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.561402 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29bee547-2bd7-4b10-852c-3178a224dea0-serving-cert\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.563358 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-client-ca\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.565301 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-client-ca\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.565699 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29bee547-2bd7-4b10-852c-3178a224dea0-serving-cert\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.566117 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3563f548-9b84-485d-992f-c2f4998bf348-serving-cert\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.566494 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-config\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.567823 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-config\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.567864 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-proxy-ca-bundles\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.585802 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rv5b\" (UniqueName: \"kubernetes.io/projected/29bee547-2bd7-4b10-852c-3178a224dea0-kube-api-access-4rv5b\") pod \"route-controller-manager-57d4b786c5-rl5jk\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.585810 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6658\" (UniqueName: \"kubernetes.io/projected/3563f548-9b84-485d-992f-c2f4998bf348-kube-api-access-c6658\") pod \"controller-manager-5f4c649f4c-9qbpl\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.600618 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.610906 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.857151 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51a76a23-8af8-439c-9a57-46b7845cdfab" path="/var/lib/kubelet/pods/51a76a23-8af8-439c-9a57-46b7845cdfab/volumes" Jan 28 12:51:56 crc kubenswrapper[4848]: I0128 12:51:56.857830 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66b6531a-60c9-4ca0-b5b4-8d981353c6e9" path="/var/lib/kubelet/pods/66b6531a-60c9-4ca0-b5b4-8d981353c6e9/volumes" Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.011925 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk"] Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.058334 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl"] Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.844237 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" event={"ID":"29bee547-2bd7-4b10-852c-3178a224dea0","Type":"ContainerStarted","Data":"114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230"} Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.844323 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" event={"ID":"29bee547-2bd7-4b10-852c-3178a224dea0","Type":"ContainerStarted","Data":"ca7464dbcf6f4d95875de5ae647fe5f9b9abd1cda4b98b8b84ba5117196e0c3c"} Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.844844 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.846617 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" event={"ID":"3563f548-9b84-485d-992f-c2f4998bf348","Type":"ContainerStarted","Data":"fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20"} Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.846676 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" event={"ID":"3563f548-9b84-485d-992f-c2f4998bf348","Type":"ContainerStarted","Data":"3f1a3fcd079536bea05383bfb030108340c43df94cde31c23b18755afbb5b54d"} Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.849970 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.864861 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" podStartSLOduration=3.864842461 podStartE2EDuration="3.864842461s" podCreationTimestamp="2026-01-28 12:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:57.864263964 +0000 UTC m=+344.776481002" watchObservedRunningTime="2026-01-28 12:51:57.864842461 +0000 UTC m=+344.777059489" Jan 28 12:51:57 crc kubenswrapper[4848]: I0128 12:51:57.885672 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" podStartSLOduration=3.88560859 podStartE2EDuration="3.88560859s" podCreationTimestamp="2026-01-28 12:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:57.884231312 +0000 UTC m=+344.796448350" watchObservedRunningTime="2026-01-28 12:51:57.88560859 +0000 UTC m=+344.797825628" Jan 28 12:51:58 crc kubenswrapper[4848]: I0128 12:51:58.861281 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:51:58 crc kubenswrapper[4848]: I0128 12:51:58.861895 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:52:37 crc kubenswrapper[4848]: I0128 12:52:37.924965 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:52:37 crc kubenswrapper[4848]: I0128 12:52:37.925924 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.358111 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tqpqm"] Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.360875 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.378417 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tqpqm"] Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.512409 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-trusted-ca\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.512487 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.512508 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-registry-certificates\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.512536 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv82m\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-kube-api-access-fv82m\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.512830 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-bound-sa-token\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.512960 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.513058 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.513143 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-registry-tls\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.544468 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.614813 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fv82m\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-kube-api-access-fv82m\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.614918 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-bound-sa-token\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.614955 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.614986 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-registry-tls\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.615019 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-trusted-ca\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.615062 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.615098 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-registry-certificates\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.617241 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-registry-certificates\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.617633 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.617751 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-trusted-ca\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.623162 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-registry-tls\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.625125 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.635236 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fv82m\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-kube-api-access-fv82m\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.635419 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/15e7f2e2-c629-4a8b-94a0-7f7d02497e4d-bound-sa-token\") pod \"image-registry-66df7c8f76-tqpqm\" (UID: \"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d\") " pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:45 crc kubenswrapper[4848]: I0128 12:52:45.687165 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:46 crc kubenswrapper[4848]: I0128 12:52:46.174076 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tqpqm"] Jan 28 12:52:47 crc kubenswrapper[4848]: I0128 12:52:47.152932 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" event={"ID":"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d","Type":"ContainerStarted","Data":"5a3437704ca40cbf21bce8b98b3fa8222d67576d0348de54be69491d295190d1"} Jan 28 12:52:47 crc kubenswrapper[4848]: I0128 12:52:47.154425 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" event={"ID":"15e7f2e2-c629-4a8b-94a0-7f7d02497e4d","Type":"ContainerStarted","Data":"61272462f0a44680a124f99f1723c2fe39ddf53c12b5e29ff32ed91c5656b36a"} Jan 28 12:52:47 crc kubenswrapper[4848]: I0128 12:52:47.155345 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:52:47 crc kubenswrapper[4848]: I0128 12:52:47.177084 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" podStartSLOduration=2.177061513 podStartE2EDuration="2.177061513s" podCreationTimestamp="2026-01-28 12:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:47.17586367 +0000 UTC m=+394.088080708" watchObservedRunningTime="2026-01-28 12:52:47.177061513 +0000 UTC m=+394.089278551" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.315623 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z884b"] Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.316590 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z884b" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="registry-server" containerID="cri-o://20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" gracePeriod=30 Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.347608 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d8bpv"] Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.349197 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d8bpv" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="registry-server" containerID="cri-o://6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" gracePeriod=30 Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.386042 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlgtd"] Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.390213 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7gdr"] Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.390704 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b7gdr" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="registry-server" containerID="cri-o://07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64" gracePeriod=30 Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.390909 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" containerID="cri-o://8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad" gracePeriod=30 Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.402790 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6gkd"] Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.403455 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c6gkd" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="registry-server" containerID="cri-o://fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b" gracePeriod=30 Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.409500 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g5r8p"] Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.411638 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.415083 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g5r8p"] Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.485696 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/69959509-efcd-4928-98ad-1dcd656b5513-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.485760 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9sck\" (UniqueName: \"kubernetes.io/projected/69959509-efcd-4928-98ad-1dcd656b5513-kube-api-access-q9sck\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.485906 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/69959509-efcd-4928-98ad-1dcd656b5513-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.587843 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/69959509-efcd-4928-98ad-1dcd656b5513-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.587935 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/69959509-efcd-4928-98ad-1dcd656b5513-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.587972 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9sck\" (UniqueName: \"kubernetes.io/projected/69959509-efcd-4928-98ad-1dcd656b5513-kube-api-access-q9sck\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.591824 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/69959509-efcd-4928-98ad-1dcd656b5513-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.597942 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/69959509-efcd-4928-98ad-1dcd656b5513-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.613189 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9sck\" (UniqueName: \"kubernetes.io/projected/69959509-efcd-4928-98ad-1dcd656b5513-kube-api-access-q9sck\") pod \"marketplace-operator-79b997595-g5r8p\" (UID: \"69959509-efcd-4928-98ad-1dcd656b5513\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.710770 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee is running failed: container process not found" containerID="20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.711336 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee is running failed: container process not found" containerID="20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.711763 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee is running failed: container process not found" containerID="20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.711807 4848 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-z884b" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="registry-server" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.787439 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.875485 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.934529 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c is running failed: container process not found" containerID="6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.939758 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c is running failed: container process not found" containerID="6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.940543 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c is running failed: container process not found" containerID="6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:52:52 crc kubenswrapper[4848]: E0128 12:52:52.940582 4848 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-d8bpv" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="registry-server" Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.993728 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-utilities\") pod \"af50828a-cf61-481c-98c3-fb3e7d8de01a\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.993808 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-catalog-content\") pod \"af50828a-cf61-481c-98c3-fb3e7d8de01a\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.993866 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqvdc\" (UniqueName: \"kubernetes.io/projected/af50828a-cf61-481c-98c3-fb3e7d8de01a-kube-api-access-tqvdc\") pod \"af50828a-cf61-481c-98c3-fb3e7d8de01a\" (UID: \"af50828a-cf61-481c-98c3-fb3e7d8de01a\") " Jan 28 12:52:52 crc kubenswrapper[4848]: I0128 12:52:52.994643 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-utilities" (OuterVolumeSpecName: "utilities") pod "af50828a-cf61-481c-98c3-fb3e7d8de01a" (UID: "af50828a-cf61-481c-98c3-fb3e7d8de01a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.010372 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af50828a-cf61-481c-98c3-fb3e7d8de01a-kube-api-access-tqvdc" (OuterVolumeSpecName: "kube-api-access-tqvdc") pod "af50828a-cf61-481c-98c3-fb3e7d8de01a" (UID: "af50828a-cf61-481c-98c3-fb3e7d8de01a"). InnerVolumeSpecName "kube-api-access-tqvdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.071949 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af50828a-cf61-481c-98c3-fb3e7d8de01a" (UID: "af50828a-cf61-481c-98c3-fb3e7d8de01a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.095048 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqvdc\" (UniqueName: \"kubernetes.io/projected/af50828a-cf61-481c-98c3-fb3e7d8de01a-kube-api-access-tqvdc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.095116 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.095132 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af50828a-cf61-481c-98c3-fb3e7d8de01a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.099147 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.105441 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.111915 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.130823 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.199152 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-catalog-content\") pod \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.199222 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-utilities\") pod \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.199337 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhj5x\" (UniqueName: \"kubernetes.io/projected/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-kube-api-access-dhj5x\") pod \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\" (UID: \"8e357b5a-bdd4-4681-a70d-afaf1275f5e4\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.199430 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-catalog-content\") pod \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.199506 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-utilities\") pod \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.199565 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x775h\" (UniqueName: \"kubernetes.io/projected/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-kube-api-access-x775h\") pod \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\" (UID: \"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.207058 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-utilities" (OuterVolumeSpecName: "utilities") pod "8e357b5a-bdd4-4681-a70d-afaf1275f5e4" (UID: "8e357b5a-bdd4-4681-a70d-afaf1275f5e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.209990 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-utilities" (OuterVolumeSpecName: "utilities") pod "4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" (UID: "4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.210193 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-kube-api-access-x775h" (OuterVolumeSpecName: "kube-api-access-x775h") pod "4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" (UID: "4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99"). InnerVolumeSpecName "kube-api-access-x775h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.213557 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-kube-api-access-dhj5x" (OuterVolumeSpecName: "kube-api-access-dhj5x") pod "8e357b5a-bdd4-4681-a70d-afaf1275f5e4" (UID: "8e357b5a-bdd4-4681-a70d-afaf1275f5e4"). InnerVolumeSpecName "kube-api-access-dhj5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.224885 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8bpv" event={"ID":"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99","Type":"ContainerDied","Data":"6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.224937 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8bpv" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.224957 4848 scope.go:117] "RemoveContainer" containerID="6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.224833 4848 generic.go:334] "Generic (PLEG): container finished" podID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerID="6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" exitCode=0 Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.225371 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8bpv" event={"ID":"4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99","Type":"ContainerDied","Data":"d78995d884c581c197f1a05b71309b47fae97ce572e5ba39862876ffe979deb4"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.228650 4848 generic.go:334] "Generic (PLEG): container finished" podID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerID="fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b" exitCode=0 Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.228855 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6gkd" event={"ID":"26dc23f3-cba2-4cb1-9cf6-7402896c876d","Type":"ContainerDied","Data":"fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.229021 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6gkd" event={"ID":"26dc23f3-cba2-4cb1-9cf6-7402896c876d","Type":"ContainerDied","Data":"a8860644a5202775c4a01ab8f5fd3e3c76084a452ca561d9738292f27b18b610"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.229236 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6gkd" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.234206 4848 generic.go:334] "Generic (PLEG): container finished" podID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerID="20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" exitCode=0 Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.234465 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z884b" event={"ID":"af50828a-cf61-481c-98c3-fb3e7d8de01a","Type":"ContainerDied","Data":"20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.234577 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z884b" event={"ID":"af50828a-cf61-481c-98c3-fb3e7d8de01a","Type":"ContainerDied","Data":"c85cbe970a5c1d90038ccb01267308e154eaa8ed7ac719a85794caf7f4f99ee8"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.234583 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z884b" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.237346 4848 generic.go:334] "Generic (PLEG): container finished" podID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerID="07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64" exitCode=0 Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.237424 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7gdr" event={"ID":"8e357b5a-bdd4-4681-a70d-afaf1275f5e4","Type":"ContainerDied","Data":"07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.237448 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7gdr" event={"ID":"8e357b5a-bdd4-4681-a70d-afaf1275f5e4","Type":"ContainerDied","Data":"2ba5cc61fe4efbb1fa0e66571e77e9c4d39fbc64e9b080f096dc922c94eea77c"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.237545 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7gdr" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.247296 4848 generic.go:334] "Generic (PLEG): container finished" podID="0d553491-aa2a-495d-b02c-73a52d29278b" containerID="8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad" exitCode=0 Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.247377 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" event={"ID":"0d553491-aa2a-495d-b02c-73a52d29278b","Type":"ContainerDied","Data":"8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.247417 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" event={"ID":"0d553491-aa2a-495d-b02c-73a52d29278b","Type":"ContainerDied","Data":"196ce14f09e1d427f4980821118e59920e1fb4f94595405d10a7c246fbb72d8e"} Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.247492 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlgtd" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.250040 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e357b5a-bdd4-4681-a70d-afaf1275f5e4" (UID: "8e357b5a-bdd4-4681-a70d-afaf1275f5e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.279739 4848 scope.go:117] "RemoveContainer" containerID="37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.282419 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z884b"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.288078 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z884b"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.294842 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" (UID: "4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.300929 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-trusted-ca\") pod \"0d553491-aa2a-495d-b02c-73a52d29278b\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301060 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fxm2\" (UniqueName: \"kubernetes.io/projected/0d553491-aa2a-495d-b02c-73a52d29278b-kube-api-access-4fxm2\") pod \"0d553491-aa2a-495d-b02c-73a52d29278b\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301099 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-utilities\") pod \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301165 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdnrr\" (UniqueName: \"kubernetes.io/projected/26dc23f3-cba2-4cb1-9cf6-7402896c876d-kube-api-access-kdnrr\") pod \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301205 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-operator-metrics\") pod \"0d553491-aa2a-495d-b02c-73a52d29278b\" (UID: \"0d553491-aa2a-495d-b02c-73a52d29278b\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301240 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-catalog-content\") pod \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\" (UID: \"26dc23f3-cba2-4cb1-9cf6-7402896c876d\") " Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301536 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301557 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301571 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhj5x\" (UniqueName: \"kubernetes.io/projected/8e357b5a-bdd4-4681-a70d-afaf1275f5e4-kube-api-access-dhj5x\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301654 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301665 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.301679 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x775h\" (UniqueName: \"kubernetes.io/projected/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99-kube-api-access-x775h\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.302330 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-utilities" (OuterVolumeSpecName: "utilities") pod "26dc23f3-cba2-4cb1-9cf6-7402896c876d" (UID: "26dc23f3-cba2-4cb1-9cf6-7402896c876d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.302648 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "0d553491-aa2a-495d-b02c-73a52d29278b" (UID: "0d553491-aa2a-495d-b02c-73a52d29278b"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.305504 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26dc23f3-cba2-4cb1-9cf6-7402896c876d-kube-api-access-kdnrr" (OuterVolumeSpecName: "kube-api-access-kdnrr") pod "26dc23f3-cba2-4cb1-9cf6-7402896c876d" (UID: "26dc23f3-cba2-4cb1-9cf6-7402896c876d"). InnerVolumeSpecName "kube-api-access-kdnrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.305706 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d553491-aa2a-495d-b02c-73a52d29278b-kube-api-access-4fxm2" (OuterVolumeSpecName: "kube-api-access-4fxm2") pod "0d553491-aa2a-495d-b02c-73a52d29278b" (UID: "0d553491-aa2a-495d-b02c-73a52d29278b"). InnerVolumeSpecName "kube-api-access-4fxm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.306608 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "0d553491-aa2a-495d-b02c-73a52d29278b" (UID: "0d553491-aa2a-495d-b02c-73a52d29278b"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.309297 4848 scope.go:117] "RemoveContainer" containerID="02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.331438 4848 scope.go:117] "RemoveContainer" containerID="6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.332266 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c\": container with ID starting with 6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c not found: ID does not exist" containerID="6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.332331 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c"} err="failed to get container status \"6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c\": rpc error: code = NotFound desc = could not find container \"6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c\": container with ID starting with 6fe768854a2814f7cad386dd4e17759df6bd3a54456c19e4c715aa7c5427dd8c not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.332372 4848 scope.go:117] "RemoveContainer" containerID="37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.332781 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5\": container with ID starting with 37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5 not found: ID does not exist" containerID="37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.332822 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5"} err="failed to get container status \"37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5\": rpc error: code = NotFound desc = could not find container \"37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5\": container with ID starting with 37480da7ed5392c06e3f7b83e43ffd8e8201960514619e50e9602904258b9fd5 not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.332859 4848 scope.go:117] "RemoveContainer" containerID="02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.333270 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed\": container with ID starting with 02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed not found: ID does not exist" containerID="02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.333341 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed"} err="failed to get container status \"02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed\": rpc error: code = NotFound desc = could not find container \"02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed\": container with ID starting with 02812e66dd7ed15e7cfd7d9827289fa241cc73ab13c482f79e2ed4b7b28897ed not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.333408 4848 scope.go:117] "RemoveContainer" containerID="fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.347282 4848 scope.go:117] "RemoveContainer" containerID="2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.362439 4848 scope.go:117] "RemoveContainer" containerID="0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.380190 4848 scope.go:117] "RemoveContainer" containerID="fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.380714 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b\": container with ID starting with fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b not found: ID does not exist" containerID="fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.380778 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b"} err="failed to get container status \"fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b\": rpc error: code = NotFound desc = could not find container \"fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b\": container with ID starting with fc7f1dc3d94131a6c7902dbd6c391193dfde7d87a4136740a678cb21c8abd17b not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.380818 4848 scope.go:117] "RemoveContainer" containerID="2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.381329 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4\": container with ID starting with 2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4 not found: ID does not exist" containerID="2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.381362 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4"} err="failed to get container status \"2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4\": rpc error: code = NotFound desc = could not find container \"2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4\": container with ID starting with 2b8c7f9d967757922eb730308bab14f0bb9fa9a81081a9b30914be0ac74b80f4 not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.381426 4848 scope.go:117] "RemoveContainer" containerID="0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.382091 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f\": container with ID starting with 0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f not found: ID does not exist" containerID="0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.382179 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f"} err="failed to get container status \"0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f\": rpc error: code = NotFound desc = could not find container \"0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f\": container with ID starting with 0e2eeece3d6b166d2a38446ba770d33b6e2e3aab12980abf7e29682d61fc901f not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.382232 4848 scope.go:117] "RemoveContainer" containerID="20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.400981 4848 scope.go:117] "RemoveContainer" containerID="c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.407448 4848 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.407522 4848 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0d553491-aa2a-495d-b02c-73a52d29278b-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.407540 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fxm2\" (UniqueName: \"kubernetes.io/projected/0d553491-aa2a-495d-b02c-73a52d29278b-kube-api-access-4fxm2\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.407553 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.407565 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdnrr\" (UniqueName: \"kubernetes.io/projected/26dc23f3-cba2-4cb1-9cf6-7402896c876d-kube-api-access-kdnrr\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.413496 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g5r8p"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.429369 4848 scope.go:117] "RemoveContainer" containerID="934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.444204 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26dc23f3-cba2-4cb1-9cf6-7402896c876d" (UID: "26dc23f3-cba2-4cb1-9cf6-7402896c876d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.449718 4848 scope.go:117] "RemoveContainer" containerID="20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.450062 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee\": container with ID starting with 20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee not found: ID does not exist" containerID="20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.450102 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee"} err="failed to get container status \"20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee\": rpc error: code = NotFound desc = could not find container \"20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee\": container with ID starting with 20b86c72c3cada3b733a4ba076f0bde77e4de611dbc58f3a5ad33303bea5d1ee not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.450127 4848 scope.go:117] "RemoveContainer" containerID="c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.450489 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e\": container with ID starting with c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e not found: ID does not exist" containerID="c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.450515 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e"} err="failed to get container status \"c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e\": rpc error: code = NotFound desc = could not find container \"c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e\": container with ID starting with c766decd8241ef999abae501bff5dd82bd25ac3ac168647e8abf334deeccf24e not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.450534 4848 scope.go:117] "RemoveContainer" containerID="934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.450858 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7\": container with ID starting with 934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7 not found: ID does not exist" containerID="934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.450891 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7"} err="failed to get container status \"934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7\": rpc error: code = NotFound desc = could not find container \"934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7\": container with ID starting with 934961adeaf6507833054fb8e0f2b210644c2f0b3c685c3001969e06362698c7 not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.450914 4848 scope.go:117] "RemoveContainer" containerID="07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.469609 4848 scope.go:117] "RemoveContainer" containerID="9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.485311 4848 scope.go:117] "RemoveContainer" containerID="44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.502154 4848 scope.go:117] "RemoveContainer" containerID="07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.502742 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64\": container with ID starting with 07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64 not found: ID does not exist" containerID="07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.502774 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64"} err="failed to get container status \"07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64\": rpc error: code = NotFound desc = could not find container \"07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64\": container with ID starting with 07468492c371bf43abc868c743246d0083f38f4aa4d5a9d713bd05046b35ab64 not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.502797 4848 scope.go:117] "RemoveContainer" containerID="9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.503000 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16\": container with ID starting with 9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16 not found: ID does not exist" containerID="9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.503025 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16"} err="failed to get container status \"9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16\": rpc error: code = NotFound desc = could not find container \"9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16\": container with ID starting with 9f026a35f28b575035928e7513aad6eebe553cb025fb72d2588cee3337fcae16 not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.503040 4848 scope.go:117] "RemoveContainer" containerID="44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.503326 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3\": container with ID starting with 44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3 not found: ID does not exist" containerID="44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.503379 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3"} err="failed to get container status \"44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3\": rpc error: code = NotFound desc = could not find container \"44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3\": container with ID starting with 44eda17db886e18fc5bbdaf16b0c8801b0ccdfa0a4a73f494797b344fb4d4db3 not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.503416 4848 scope.go:117] "RemoveContainer" containerID="8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.508830 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26dc23f3-cba2-4cb1-9cf6-7402896c876d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.517059 4848 scope.go:117] "RemoveContainer" containerID="11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.607185 4848 scope.go:117] "RemoveContainer" containerID="8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.607826 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad\": container with ID starting with 8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad not found: ID does not exist" containerID="8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.608008 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad"} err="failed to get container status \"8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad\": rpc error: code = NotFound desc = could not find container \"8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad\": container with ID starting with 8edec8abdd29b18f28f59425334d5dd3dc2481387352703c0465cb9cbb0ae2ad not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.609949 4848 scope.go:117] "RemoveContainer" containerID="11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0" Jan 28 12:52:53 crc kubenswrapper[4848]: E0128 12:52:53.610693 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0\": container with ID starting with 11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0 not found: ID does not exist" containerID="11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.610726 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0"} err="failed to get container status \"11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0\": rpc error: code = NotFound desc = could not find container \"11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0\": container with ID starting with 11af5cdb1797c4faa9b577556fa1b362042ee183719299da28840e29d01d33d0 not found: ID does not exist" Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.641000 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlgtd"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.647346 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlgtd"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.658700 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6gkd"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.668318 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c6gkd"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.673817 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7gdr"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.680260 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7gdr"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.709940 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d8bpv"] Jan 28 12:52:53 crc kubenswrapper[4848]: I0128 12:52:53.724797 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d8bpv"] Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.261546 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" event={"ID":"69959509-efcd-4928-98ad-1dcd656b5513","Type":"ContainerStarted","Data":"a2b537a16b364bf95a0d3d12dc16c9c1ed199539f21fc85e443f0e3708ab265c"} Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.261618 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" event={"ID":"69959509-efcd-4928-98ad-1dcd656b5513","Type":"ContainerStarted","Data":"dc46794888bc9754a1087bb9a0b175a97e33482f12f0e99000e6485feadcc9c1"} Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.263494 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.288188 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" podStartSLOduration=2.288159003 podStartE2EDuration="2.288159003s" podCreationTimestamp="2026-01-28 12:52:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:54.284784878 +0000 UTC m=+401.197001916" watchObservedRunningTime="2026-01-28 12:52:54.288159003 +0000 UTC m=+401.200376051" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.293437 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-g5r8p" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.425770 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl"] Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.426040 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" podUID="3563f548-9b84-485d-992f-c2f4998bf348" containerName="controller-manager" containerID="cri-o://fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20" gracePeriod=30 Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.447530 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk"] Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.447834 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" podUID="29bee547-2bd7-4b10-852c-3178a224dea0" containerName="route-controller-manager" containerID="cri-o://114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230" gracePeriod=30 Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.522865 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9pjsb"] Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523134 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523150 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523162 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523169 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523184 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523192 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523207 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523214 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523226 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523233 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523240 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523268 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523283 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523290 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523302 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523309 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523320 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523328 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523338 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523346 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="extract-utilities" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523357 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523365 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="extract-content" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523374 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523381 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523390 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523398 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: E0128 12:52:54.523406 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523414 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523524 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523540 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523552 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523559 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523572 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" containerName="registry-server" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.523745 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" containerName="marketplace-operator" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.524396 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.526624 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.541121 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9pjsb"] Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.622444 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1816581b-af94-4067-9cd0-23c9e204bd4c-utilities\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.622913 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ktb2\" (UniqueName: \"kubernetes.io/projected/1816581b-af94-4067-9cd0-23c9e204bd4c-kube-api-access-5ktb2\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.622972 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1816581b-af94-4067-9cd0-23c9e204bd4c-catalog-content\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.714860 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sf2v5"] Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.715873 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.720212 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.723637 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ktb2\" (UniqueName: \"kubernetes.io/projected/1816581b-af94-4067-9cd0-23c9e204bd4c-kube-api-access-5ktb2\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.723691 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1816581b-af94-4067-9cd0-23c9e204bd4c-catalog-content\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.723731 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1816581b-af94-4067-9cd0-23c9e204bd4c-utilities\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.724287 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1816581b-af94-4067-9cd0-23c9e204bd4c-utilities\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.724722 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1816581b-af94-4067-9cd0-23c9e204bd4c-catalog-content\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.727514 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sf2v5"] Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.753119 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ktb2\" (UniqueName: \"kubernetes.io/projected/1816581b-af94-4067-9cd0-23c9e204bd4c-kube-api-access-5ktb2\") pod \"redhat-marketplace-9pjsb\" (UID: \"1816581b-af94-4067-9cd0-23c9e204bd4c\") " pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.824671 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-utilities\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.824739 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-catalog-content\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.824784 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm9lj\" (UniqueName: \"kubernetes.io/projected/02b30305-56c4-45c3-aae4-de194e8caa56-kube-api-access-tm9lj\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.860296 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d553491-aa2a-495d-b02c-73a52d29278b" path="/var/lib/kubelet/pods/0d553491-aa2a-495d-b02c-73a52d29278b/volumes" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.860881 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26dc23f3-cba2-4cb1-9cf6-7402896c876d" path="/var/lib/kubelet/pods/26dc23f3-cba2-4cb1-9cf6-7402896c876d/volumes" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.861568 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99" path="/var/lib/kubelet/pods/4694bd45-c9d2-4b53-ac2f-bc5e9e74dc99/volumes" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.862780 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e357b5a-bdd4-4681-a70d-afaf1275f5e4" path="/var/lib/kubelet/pods/8e357b5a-bdd4-4681-a70d-afaf1275f5e4/volumes" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.863406 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af50828a-cf61-481c-98c3-fb3e7d8de01a" path="/var/lib/kubelet/pods/af50828a-cf61-481c-98c3-fb3e7d8de01a/volumes" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.912206 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.918768 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.928199 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-utilities\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.928291 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-catalog-content\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.928363 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm9lj\" (UniqueName: \"kubernetes.io/projected/02b30305-56c4-45c3-aae4-de194e8caa56-kube-api-access-tm9lj\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.929480 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-utilities\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.929510 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-catalog-content\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.932185 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:52:54 crc kubenswrapper[4848]: I0128 12:52:54.949754 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm9lj\" (UniqueName: \"kubernetes.io/projected/02b30305-56c4-45c3-aae4-de194e8caa56-kube-api-access-tm9lj\") pod \"certified-operators-sf2v5\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.029512 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-client-ca\") pod \"29bee547-2bd7-4b10-852c-3178a224dea0\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030069 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6658\" (UniqueName: \"kubernetes.io/projected/3563f548-9b84-485d-992f-c2f4998bf348-kube-api-access-c6658\") pod \"3563f548-9b84-485d-992f-c2f4998bf348\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030127 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-config\") pod \"29bee547-2bd7-4b10-852c-3178a224dea0\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030159 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3563f548-9b84-485d-992f-c2f4998bf348-serving-cert\") pod \"3563f548-9b84-485d-992f-c2f4998bf348\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030186 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-config\") pod \"3563f548-9b84-485d-992f-c2f4998bf348\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030210 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29bee547-2bd7-4b10-852c-3178a224dea0-serving-cert\") pod \"29bee547-2bd7-4b10-852c-3178a224dea0\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030241 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-proxy-ca-bundles\") pod \"3563f548-9b84-485d-992f-c2f4998bf348\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030379 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rv5b\" (UniqueName: \"kubernetes.io/projected/29bee547-2bd7-4b10-852c-3178a224dea0-kube-api-access-4rv5b\") pod \"29bee547-2bd7-4b10-852c-3178a224dea0\" (UID: \"29bee547-2bd7-4b10-852c-3178a224dea0\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030455 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-client-ca\") pod \"3563f548-9b84-485d-992f-c2f4998bf348\" (UID: \"3563f548-9b84-485d-992f-c2f4998bf348\") " Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.030602 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.031222 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-client-ca" (OuterVolumeSpecName: "client-ca") pod "29bee547-2bd7-4b10-852c-3178a224dea0" (UID: "29bee547-2bd7-4b10-852c-3178a224dea0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.031329 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-config" (OuterVolumeSpecName: "config") pod "29bee547-2bd7-4b10-852c-3178a224dea0" (UID: "29bee547-2bd7-4b10-852c-3178a224dea0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.031802 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-client-ca" (OuterVolumeSpecName: "client-ca") pod "3563f548-9b84-485d-992f-c2f4998bf348" (UID: "3563f548-9b84-485d-992f-c2f4998bf348"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.031872 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-config" (OuterVolumeSpecName: "config") pod "3563f548-9b84-485d-992f-c2f4998bf348" (UID: "3563f548-9b84-485d-992f-c2f4998bf348"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.031917 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3563f548-9b84-485d-992f-c2f4998bf348" (UID: "3563f548-9b84-485d-992f-c2f4998bf348"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.039089 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3563f548-9b84-485d-992f-c2f4998bf348-kube-api-access-c6658" (OuterVolumeSpecName: "kube-api-access-c6658") pod "3563f548-9b84-485d-992f-c2f4998bf348" (UID: "3563f548-9b84-485d-992f-c2f4998bf348"). InnerVolumeSpecName "kube-api-access-c6658". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.039097 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3563f548-9b84-485d-992f-c2f4998bf348-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3563f548-9b84-485d-992f-c2f4998bf348" (UID: "3563f548-9b84-485d-992f-c2f4998bf348"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.039126 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29bee547-2bd7-4b10-852c-3178a224dea0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "29bee547-2bd7-4b10-852c-3178a224dea0" (UID: "29bee547-2bd7-4b10-852c-3178a224dea0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.039144 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29bee547-2bd7-4b10-852c-3178a224dea0-kube-api-access-4rv5b" (OuterVolumeSpecName: "kube-api-access-4rv5b") pod "29bee547-2bd7-4b10-852c-3178a224dea0" (UID: "29bee547-2bd7-4b10-852c-3178a224dea0"). InnerVolumeSpecName "kube-api-access-4rv5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131873 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131911 4848 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131925 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6658\" (UniqueName: \"kubernetes.io/projected/3563f548-9b84-485d-992f-c2f4998bf348-kube-api-access-c6658\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131940 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29bee547-2bd7-4b10-852c-3178a224dea0-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131953 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3563f548-9b84-485d-992f-c2f4998bf348-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131965 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131976 4848 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29bee547-2bd7-4b10-852c-3178a224dea0-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.131988 4848 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3563f548-9b84-485d-992f-c2f4998bf348-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.132002 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rv5b\" (UniqueName: \"kubernetes.io/projected/29bee547-2bd7-4b10-852c-3178a224dea0-kube-api-access-4rv5b\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.188656 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9pjsb"] Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.278993 4848 generic.go:334] "Generic (PLEG): container finished" podID="29bee547-2bd7-4b10-852c-3178a224dea0" containerID="114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230" exitCode=0 Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.279168 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.279322 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" event={"ID":"29bee547-2bd7-4b10-852c-3178a224dea0","Type":"ContainerDied","Data":"114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230"} Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.279383 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk" event={"ID":"29bee547-2bd7-4b10-852c-3178a224dea0","Type":"ContainerDied","Data":"ca7464dbcf6f4d95875de5ae647fe5f9b9abd1cda4b98b8b84ba5117196e0c3c"} Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.279419 4848 scope.go:117] "RemoveContainer" containerID="114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.286862 4848 generic.go:334] "Generic (PLEG): container finished" podID="3563f548-9b84-485d-992f-c2f4998bf348" containerID="fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20" exitCode=0 Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.286949 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" event={"ID":"3563f548-9b84-485d-992f-c2f4998bf348","Type":"ContainerDied","Data":"fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20"} Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.286980 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" event={"ID":"3563f548-9b84-485d-992f-c2f4998bf348","Type":"ContainerDied","Data":"3f1a3fcd079536bea05383bfb030108340c43df94cde31c23b18755afbb5b54d"} Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.287052 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.288466 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pjsb" event={"ID":"1816581b-af94-4067-9cd0-23c9e204bd4c","Type":"ContainerStarted","Data":"ed07de3a0700ac11b4022f9ffcdb5b993ddf2237d56d63457cf1af312b66fd09"} Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.309400 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sf2v5"] Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.320572 4848 scope.go:117] "RemoveContainer" containerID="114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230" Jan 28 12:52:55 crc kubenswrapper[4848]: E0128 12:52:55.326784 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230\": container with ID starting with 114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230 not found: ID does not exist" containerID="114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.327198 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230"} err="failed to get container status \"114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230\": rpc error: code = NotFound desc = could not find container \"114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230\": container with ID starting with 114479945f59c29fd4c895e70db03afe3f3c7d09c7a11855e7ea2074dd728230 not found: ID does not exist" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.327234 4848 scope.go:117] "RemoveContainer" containerID="fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.354291 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl"] Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.363700 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5f4c649f4c-9qbpl"] Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.369827 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk"] Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.372622 4848 scope.go:117] "RemoveContainer" containerID="fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20" Jan 28 12:52:55 crc kubenswrapper[4848]: E0128 12:52:55.373287 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20\": container with ID starting with fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20 not found: ID does not exist" containerID="fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.373324 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20"} err="failed to get container status \"fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20\": rpc error: code = NotFound desc = could not find container \"fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20\": container with ID starting with fa52f28417bdeed03f7b660cf1821a5d595d8a9aaddb8f205af149e59ee2cc20 not found: ID does not exist" Jan 28 12:52:55 crc kubenswrapper[4848]: I0128 12:52:55.374609 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d4b786c5-rl5jk"] Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.296171 4848 generic.go:334] "Generic (PLEG): container finished" podID="02b30305-56c4-45c3-aae4-de194e8caa56" containerID="03aca973d0f0eb1cd3e583b932d42bb95ba5a0ac11106d0a613ec37bd533d35e" exitCode=0 Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.296292 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf2v5" event={"ID":"02b30305-56c4-45c3-aae4-de194e8caa56","Type":"ContainerDied","Data":"03aca973d0f0eb1cd3e583b932d42bb95ba5a0ac11106d0a613ec37bd533d35e"} Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.296360 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf2v5" event={"ID":"02b30305-56c4-45c3-aae4-de194e8caa56","Type":"ContainerStarted","Data":"aac2eb693b964ff2593038ce498308d76096e593c15e01546e30ac515b444986"} Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.303801 4848 generic.go:334] "Generic (PLEG): container finished" podID="1816581b-af94-4067-9cd0-23c9e204bd4c" containerID="bad258cffbc33ef509970b8087459e95112cf70aeeeb01d45ea4986df5daf77c" exitCode=0 Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.305029 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pjsb" event={"ID":"1816581b-af94-4067-9cd0-23c9e204bd4c","Type":"ContainerDied","Data":"bad258cffbc33ef509970b8087459e95112cf70aeeeb01d45ea4986df5daf77c"} Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.328457 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl"] Jan 28 12:52:56 crc kubenswrapper[4848]: E0128 12:52:56.328866 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3563f548-9b84-485d-992f-c2f4998bf348" containerName="controller-manager" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.328888 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="3563f548-9b84-485d-992f-c2f4998bf348" containerName="controller-manager" Jan 28 12:52:56 crc kubenswrapper[4848]: E0128 12:52:56.328924 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29bee547-2bd7-4b10-852c-3178a224dea0" containerName="route-controller-manager" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.328934 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="29bee547-2bd7-4b10-852c-3178a224dea0" containerName="route-controller-manager" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.329078 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="3563f548-9b84-485d-992f-c2f4998bf348" containerName="controller-manager" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.329106 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="29bee547-2bd7-4b10-852c-3178a224dea0" containerName="route-controller-manager" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.329876 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.332364 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.332667 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.332852 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.332902 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.333066 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.332856 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.333290 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-m7gkk"] Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.334401 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.338047 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.338198 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.338066 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.338348 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.338379 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.338734 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.345598 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.349834 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl"] Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.359153 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-m7gkk"] Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384087 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9wcp\" (UniqueName: \"kubernetes.io/projected/c4f4be3c-5836-4567-b030-2720240933e8-kube-api-access-g9wcp\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384358 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-config\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384417 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c4f4be3c-5836-4567-b030-2720240933e8-client-ca\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384482 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-client-ca\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384518 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-proxy-ca-bundles\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384551 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4f4be3c-5836-4567-b030-2720240933e8-serving-cert\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384572 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea482a02-f2ba-4bfd-bb1b-963082545a69-serving-cert\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384627 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4f4be3c-5836-4567-b030-2720240933e8-config\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.384713 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2nvr\" (UniqueName: \"kubernetes.io/projected/ea482a02-f2ba-4bfd-bb1b-963082545a69-kube-api-access-v2nvr\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.485840 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9wcp\" (UniqueName: \"kubernetes.io/projected/c4f4be3c-5836-4567-b030-2720240933e8-kube-api-access-g9wcp\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.485897 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-config\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.485932 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c4f4be3c-5836-4567-b030-2720240933e8-client-ca\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.485955 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-client-ca\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.485977 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-proxy-ca-bundles\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.485998 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4f4be3c-5836-4567-b030-2720240933e8-serving-cert\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.486013 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea482a02-f2ba-4bfd-bb1b-963082545a69-serving-cert\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.486044 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4f4be3c-5836-4567-b030-2720240933e8-config\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.486079 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2nvr\" (UniqueName: \"kubernetes.io/projected/ea482a02-f2ba-4bfd-bb1b-963082545a69-kube-api-access-v2nvr\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.487337 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-config\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.487777 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-proxy-ca-bundles\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.488898 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4f4be3c-5836-4567-b030-2720240933e8-config\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.490462 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c4f4be3c-5836-4567-b030-2720240933e8-client-ca\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.491793 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ea482a02-f2ba-4bfd-bb1b-963082545a69-client-ca\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.492912 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4f4be3c-5836-4567-b030-2720240933e8-serving-cert\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.493289 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea482a02-f2ba-4bfd-bb1b-963082545a69-serving-cert\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.504722 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2nvr\" (UniqueName: \"kubernetes.io/projected/ea482a02-f2ba-4bfd-bb1b-963082545a69-kube-api-access-v2nvr\") pod \"controller-manager-8697fb7788-m7gkk\" (UID: \"ea482a02-f2ba-4bfd-bb1b-963082545a69\") " pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.505771 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9wcp\" (UniqueName: \"kubernetes.io/projected/c4f4be3c-5836-4567-b030-2720240933e8-kube-api-access-g9wcp\") pod \"route-controller-manager-74d95f9b48-89ngl\" (UID: \"c4f4be3c-5836-4567-b030-2720240933e8\") " pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.699434 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.712975 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.864979 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29bee547-2bd7-4b10-852c-3178a224dea0" path="/var/lib/kubelet/pods/29bee547-2bd7-4b10-852c-3178a224dea0/volumes" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.866053 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3563f548-9b84-485d-992f-c2f4998bf348" path="/var/lib/kubelet/pods/3563f548-9b84-485d-992f-c2f4998bf348/volumes" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.929758 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl"] Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.942150 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jzvm9"] Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.950290 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.953928 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.957703 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jzvm9"] Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.995352 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-utilities\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.995424 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khvc5\" (UniqueName: \"kubernetes.io/projected/658ce371-1c32-4cb6-ab5c-9f67ed85353b-kube-api-access-khvc5\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:56 crc kubenswrapper[4848]: I0128 12:52:56.995470 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-catalog-content\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.098319 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-catalog-content\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.098426 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-utilities\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.098465 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khvc5\" (UniqueName: \"kubernetes.io/projected/658ce371-1c32-4cb6-ab5c-9f67ed85353b-kube-api-access-khvc5\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.099907 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-utilities\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.099994 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-catalog-content\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.120267 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fznvv"] Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.121796 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.126163 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.130141 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khvc5\" (UniqueName: \"kubernetes.io/projected/658ce371-1c32-4cb6-ab5c-9f67ed85353b-kube-api-access-khvc5\") pod \"redhat-operators-jzvm9\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.145808 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fznvv"] Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.202932 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a959de4-373b-4ee5-a5ef-425d06ccea02-catalog-content\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.203019 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsvwb\" (UniqueName: \"kubernetes.io/projected/0a959de4-373b-4ee5-a5ef-425d06ccea02-kube-api-access-gsvwb\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.203073 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a959de4-373b-4ee5-a5ef-425d06ccea02-utilities\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.238791 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8697fb7788-m7gkk"] Jan 28 12:52:57 crc kubenswrapper[4848]: W0128 12:52:57.263458 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea482a02_f2ba_4bfd_bb1b_963082545a69.slice/crio-780a1bfcf350216a2702095e8b7459a68d8bf0b219cc3d23b3f667f524f5759c WatchSource:0}: Error finding container 780a1bfcf350216a2702095e8b7459a68d8bf0b219cc3d23b3f667f524f5759c: Status 404 returned error can't find the container with id 780a1bfcf350216a2702095e8b7459a68d8bf0b219cc3d23b3f667f524f5759c Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.284216 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.304758 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a959de4-373b-4ee5-a5ef-425d06ccea02-utilities\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.304821 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a959de4-373b-4ee5-a5ef-425d06ccea02-catalog-content\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.304873 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsvwb\" (UniqueName: \"kubernetes.io/projected/0a959de4-373b-4ee5-a5ef-425d06ccea02-kube-api-access-gsvwb\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.305260 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a959de4-373b-4ee5-a5ef-425d06ccea02-utilities\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.305626 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a959de4-373b-4ee5-a5ef-425d06ccea02-catalog-content\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.325055 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" event={"ID":"ea482a02-f2ba-4bfd-bb1b-963082545a69","Type":"ContainerStarted","Data":"780a1bfcf350216a2702095e8b7459a68d8bf0b219cc3d23b3f667f524f5759c"} Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.327342 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" event={"ID":"c4f4be3c-5836-4567-b030-2720240933e8","Type":"ContainerStarted","Data":"f75e68a2e74fd985c40d39191ccfe5c0f2badd7720b14abdb9ee621078b8bde3"} Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.327385 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" event={"ID":"c4f4be3c-5836-4567-b030-2720240933e8","Type":"ContainerStarted","Data":"012579552b130f4c8205f658ff0d1339c6c59ce5ee56c70771ab0b6eb1cc2419"} Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.328525 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.331463 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsvwb\" (UniqueName: \"kubernetes.io/projected/0a959de4-373b-4ee5-a5ef-425d06ccea02-kube-api-access-gsvwb\") pod \"community-operators-fznvv\" (UID: \"0a959de4-373b-4ee5-a5ef-425d06ccea02\") " pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.347485 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" podStartSLOduration=3.347462175 podStartE2EDuration="3.347462175s" podCreationTimestamp="2026-01-28 12:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:57.344713347 +0000 UTC m=+404.256930395" watchObservedRunningTime="2026-01-28 12:52:57.347462175 +0000 UTC m=+404.259679213" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.442730 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.556120 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-74d95f9b48-89ngl" Jan 28 12:52:57 crc kubenswrapper[4848]: I0128 12:52:57.825619 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jzvm9"] Jan 28 12:52:57 crc kubenswrapper[4848]: W0128 12:52:57.839488 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod658ce371_1c32_4cb6_ab5c_9f67ed85353b.slice/crio-0cde0fb010ffd9c4c488dd23782e150f00251df889dff723ecf0e1fdb26b161a WatchSource:0}: Error finding container 0cde0fb010ffd9c4c488dd23782e150f00251df889dff723ecf0e1fdb26b161a: Status 404 returned error can't find the container with id 0cde0fb010ffd9c4c488dd23782e150f00251df889dff723ecf0e1fdb26b161a Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.020343 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fznvv"] Jan 28 12:52:58 crc kubenswrapper[4848]: W0128 12:52:58.036478 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a959de4_373b_4ee5_a5ef_425d06ccea02.slice/crio-4cb9b24032958bf572661ba1d5835af7faa09e19078063c8f68bfbec7a5f971d WatchSource:0}: Error finding container 4cb9b24032958bf572661ba1d5835af7faa09e19078063c8f68bfbec7a5f971d: Status 404 returned error can't find the container with id 4cb9b24032958bf572661ba1d5835af7faa09e19078063c8f68bfbec7a5f971d Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.341323 4848 generic.go:334] "Generic (PLEG): container finished" podID="0a959de4-373b-4ee5-a5ef-425d06ccea02" containerID="f400fb452d4889272263858231629ba67452d935192ffdf6e133dadb86ba2fb6" exitCode=0 Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.341397 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fznvv" event={"ID":"0a959de4-373b-4ee5-a5ef-425d06ccea02","Type":"ContainerDied","Data":"f400fb452d4889272263858231629ba67452d935192ffdf6e133dadb86ba2fb6"} Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.341459 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fznvv" event={"ID":"0a959de4-373b-4ee5-a5ef-425d06ccea02","Type":"ContainerStarted","Data":"4cb9b24032958bf572661ba1d5835af7faa09e19078063c8f68bfbec7a5f971d"} Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.343815 4848 generic.go:334] "Generic (PLEG): container finished" podID="1816581b-af94-4067-9cd0-23c9e204bd4c" containerID="1898e98c1265ec7a55749a22b9cd80f123946783128b6f74b415bf44f9acb37a" exitCode=0 Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.343897 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pjsb" event={"ID":"1816581b-af94-4067-9cd0-23c9e204bd4c","Type":"ContainerDied","Data":"1898e98c1265ec7a55749a22b9cd80f123946783128b6f74b415bf44f9acb37a"} Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.348412 4848 generic.go:334] "Generic (PLEG): container finished" podID="02b30305-56c4-45c3-aae4-de194e8caa56" containerID="1f16c72c559ceaf1cfa6a2c449a7e471c2d82f871612313f830a7a1a787becdf" exitCode=0 Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.348471 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf2v5" event={"ID":"02b30305-56c4-45c3-aae4-de194e8caa56","Type":"ContainerDied","Data":"1f16c72c559ceaf1cfa6a2c449a7e471c2d82f871612313f830a7a1a787becdf"} Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.349958 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" event={"ID":"ea482a02-f2ba-4bfd-bb1b-963082545a69","Type":"ContainerStarted","Data":"38b2676f432a983cad5b41cec6bee7cdda157a28a6f051a4a96ecaaca93385f6"} Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.350829 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.351931 4848 generic.go:334] "Generic (PLEG): container finished" podID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerID="eef580e18af88bc5a8372506a007a2226ea45e135e9c2c4d9785071664997a59" exitCode=0 Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.353224 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jzvm9" event={"ID":"658ce371-1c32-4cb6-ab5c-9f67ed85353b","Type":"ContainerDied","Data":"eef580e18af88bc5a8372506a007a2226ea45e135e9c2c4d9785071664997a59"} Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.353261 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jzvm9" event={"ID":"658ce371-1c32-4cb6-ab5c-9f67ed85353b","Type":"ContainerStarted","Data":"0cde0fb010ffd9c4c488dd23782e150f00251df889dff723ecf0e1fdb26b161a"} Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.360153 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" Jan 28 12:52:58 crc kubenswrapper[4848]: I0128 12:52:58.397021 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8697fb7788-m7gkk" podStartSLOduration=4.397000255 podStartE2EDuration="4.397000255s" podCreationTimestamp="2026-01-28 12:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:58.395107691 +0000 UTC m=+405.307324719" watchObservedRunningTime="2026-01-28 12:52:58.397000255 +0000 UTC m=+405.309217303" Jan 28 12:52:59 crc kubenswrapper[4848]: I0128 12:52:59.367024 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fznvv" event={"ID":"0a959de4-373b-4ee5-a5ef-425d06ccea02","Type":"ContainerStarted","Data":"20fbcf062d382fba141cef5054f7a2b7fcd31e1cdf9b43aad91b5d375739fe7c"} Jan 28 12:52:59 crc kubenswrapper[4848]: I0128 12:52:59.373655 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pjsb" event={"ID":"1816581b-af94-4067-9cd0-23c9e204bd4c","Type":"ContainerStarted","Data":"3dd99f5e2cf30e727a53be3b9c062d832aeab280ad5e8c5de482aa9a92ebb1ca"} Jan 28 12:52:59 crc kubenswrapper[4848]: I0128 12:52:59.377966 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jzvm9" event={"ID":"658ce371-1c32-4cb6-ab5c-9f67ed85353b","Type":"ContainerStarted","Data":"596e37d805c65e880e443ca109bbaf1cb7bacd829a6f5d62ae970be0113a46e2"} Jan 28 12:52:59 crc kubenswrapper[4848]: I0128 12:52:59.430068 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9pjsb" podStartSLOduration=2.732383392 podStartE2EDuration="5.430041027s" podCreationTimestamp="2026-01-28 12:52:54 +0000 UTC" firstStartedPulling="2026-01-28 12:52:56.306339572 +0000 UTC m=+403.218556610" lastFinishedPulling="2026-01-28 12:52:59.003997207 +0000 UTC m=+405.916214245" observedRunningTime="2026-01-28 12:52:59.427397202 +0000 UTC m=+406.339614250" watchObservedRunningTime="2026-01-28 12:52:59.430041027 +0000 UTC m=+406.342258065" Jan 28 12:52:59 crc kubenswrapper[4848]: E0128 12:52:59.545631 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a959de4_373b_4ee5_a5ef_425d06ccea02.slice/crio-20fbcf062d382fba141cef5054f7a2b7fcd31e1cdf9b43aad91b5d375739fe7c.scope\": RecentStats: unable to find data in memory cache]" Jan 28 12:53:00 crc kubenswrapper[4848]: I0128 12:53:00.385284 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jzvm9" event={"ID":"658ce371-1c32-4cb6-ab5c-9f67ed85353b","Type":"ContainerDied","Data":"596e37d805c65e880e443ca109bbaf1cb7bacd829a6f5d62ae970be0113a46e2"} Jan 28 12:53:00 crc kubenswrapper[4848]: I0128 12:53:00.385227 4848 generic.go:334] "Generic (PLEG): container finished" podID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerID="596e37d805c65e880e443ca109bbaf1cb7bacd829a6f5d62ae970be0113a46e2" exitCode=0 Jan 28 12:53:00 crc kubenswrapper[4848]: I0128 12:53:00.390131 4848 generic.go:334] "Generic (PLEG): container finished" podID="0a959de4-373b-4ee5-a5ef-425d06ccea02" containerID="20fbcf062d382fba141cef5054f7a2b7fcd31e1cdf9b43aad91b5d375739fe7c" exitCode=0 Jan 28 12:53:00 crc kubenswrapper[4848]: I0128 12:53:00.390212 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fznvv" event={"ID":"0a959de4-373b-4ee5-a5ef-425d06ccea02","Type":"ContainerDied","Data":"20fbcf062d382fba141cef5054f7a2b7fcd31e1cdf9b43aad91b5d375739fe7c"} Jan 28 12:53:00 crc kubenswrapper[4848]: I0128 12:53:00.405729 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf2v5" event={"ID":"02b30305-56c4-45c3-aae4-de194e8caa56","Type":"ContainerStarted","Data":"2969ebabbe6a0eb6eea29fa7826924f19ab5dea4802785fa48a8022167f9ec0f"} Jan 28 12:53:00 crc kubenswrapper[4848]: I0128 12:53:00.439079 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sf2v5" podStartSLOduration=3.537266906 podStartE2EDuration="6.43905523s" podCreationTimestamp="2026-01-28 12:52:54 +0000 UTC" firstStartedPulling="2026-01-28 12:52:56.299081447 +0000 UTC m=+403.211298495" lastFinishedPulling="2026-01-28 12:52:59.200869781 +0000 UTC m=+406.113086819" observedRunningTime="2026-01-28 12:53:00.437722053 +0000 UTC m=+407.349939101" watchObservedRunningTime="2026-01-28 12:53:00.43905523 +0000 UTC m=+407.351272268" Jan 28 12:53:01 crc kubenswrapper[4848]: I0128 12:53:01.412852 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fznvv" event={"ID":"0a959de4-373b-4ee5-a5ef-425d06ccea02","Type":"ContainerStarted","Data":"af713ef2bbaf19e245f81e664628a92e3f6871cd6adbf45954f2c7a3954b3f82"} Jan 28 12:53:01 crc kubenswrapper[4848]: I0128 12:53:01.415505 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jzvm9" event={"ID":"658ce371-1c32-4cb6-ab5c-9f67ed85353b","Type":"ContainerStarted","Data":"018e6be662a04bd10a921eea5d1b9fc75818a3fa03a6123212a91e4042a12d4f"} Jan 28 12:53:01 crc kubenswrapper[4848]: I0128 12:53:01.436350 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fznvv" podStartSLOduration=1.9154818919999999 podStartE2EDuration="4.436326301s" podCreationTimestamp="2026-01-28 12:52:57 +0000 UTC" firstStartedPulling="2026-01-28 12:52:58.344511029 +0000 UTC m=+405.256728067" lastFinishedPulling="2026-01-28 12:53:00.865355438 +0000 UTC m=+407.777572476" observedRunningTime="2026-01-28 12:53:01.431752492 +0000 UTC m=+408.343969530" watchObservedRunningTime="2026-01-28 12:53:01.436326301 +0000 UTC m=+408.348543339" Jan 28 12:53:01 crc kubenswrapper[4848]: I0128 12:53:01.470310 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jzvm9" podStartSLOduration=3.028195662 podStartE2EDuration="5.470286273s" podCreationTimestamp="2026-01-28 12:52:56 +0000 UTC" firstStartedPulling="2026-01-28 12:52:58.354362697 +0000 UTC m=+405.266579735" lastFinishedPulling="2026-01-28 12:53:00.796453308 +0000 UTC m=+407.708670346" observedRunningTime="2026-01-28 12:53:01.467903235 +0000 UTC m=+408.380120283" watchObservedRunningTime="2026-01-28 12:53:01.470286273 +0000 UTC m=+408.382503321" Jan 28 12:53:04 crc kubenswrapper[4848]: I0128 12:53:04.912410 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:53:04 crc kubenswrapper[4848]: I0128 12:53:04.912672 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:53:04 crc kubenswrapper[4848]: I0128 12:53:04.962674 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:53:05 crc kubenswrapper[4848]: I0128 12:53:05.031647 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:53:05 crc kubenswrapper[4848]: I0128 12:53:05.031693 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:53:05 crc kubenswrapper[4848]: I0128 12:53:05.069553 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:53:05 crc kubenswrapper[4848]: I0128 12:53:05.484964 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 12:53:05 crc kubenswrapper[4848]: I0128 12:53:05.487654 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9pjsb" Jan 28 12:53:05 crc kubenswrapper[4848]: I0128 12:53:05.693540 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-tqpqm" Jan 28 12:53:05 crc kubenswrapper[4848]: I0128 12:53:05.749220 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fsgh8"] Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.284489 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.284554 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.335290 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.444042 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.444096 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.491765 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.503667 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.534820 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fznvv" Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.924565 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:53:07 crc kubenswrapper[4848]: I0128 12:53:07.924619 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:53:30 crc kubenswrapper[4848]: I0128 12:53:30.794331 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" podUID="1929eb16-0432-46a9-871d-3a2d75f37d7a" containerName="registry" containerID="cri-o://33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e" gracePeriod=30 Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.227701 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.392936 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-trusted-ca\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.393024 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbpqh\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-kube-api-access-pbpqh\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.393062 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-bound-sa-token\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.393298 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.393332 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1929eb16-0432-46a9-871d-3a2d75f37d7a-ca-trust-extracted\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.393387 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-tls\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.393423 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1929eb16-0432-46a9-871d-3a2d75f37d7a-installation-pull-secrets\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.393440 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-certificates\") pod \"1929eb16-0432-46a9-871d-3a2d75f37d7a\" (UID: \"1929eb16-0432-46a9-871d-3a2d75f37d7a\") " Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.394511 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.394583 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.412330 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1929eb16-0432-46a9-871d-3a2d75f37d7a-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.437212 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1929eb16-0432-46a9-871d-3a2d75f37d7a-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.437212 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-kube-api-access-pbpqh" (OuterVolumeSpecName: "kube-api-access-pbpqh") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "kube-api-access-pbpqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.437687 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.437880 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.438131 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "1929eb16-0432-46a9-871d-3a2d75f37d7a" (UID: "1929eb16-0432-46a9-871d-3a2d75f37d7a"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.495009 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbpqh\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-kube-api-access-pbpqh\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.495067 4848 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.495079 4848 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1929eb16-0432-46a9-871d-3a2d75f37d7a-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.495092 4848 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.495106 4848 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1929eb16-0432-46a9-871d-3a2d75f37d7a-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.495117 4848 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.495126 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1929eb16-0432-46a9-871d-3a2d75f37d7a-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.591756 4848 generic.go:334] "Generic (PLEG): container finished" podID="1929eb16-0432-46a9-871d-3a2d75f37d7a" containerID="33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e" exitCode=0 Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.591804 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" event={"ID":"1929eb16-0432-46a9-871d-3a2d75f37d7a","Type":"ContainerDied","Data":"33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e"} Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.591852 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" event={"ID":"1929eb16-0432-46a9-871d-3a2d75f37d7a","Type":"ContainerDied","Data":"2fef46230d113794d7047a92200c68fff286525a489507c8bd79057d5a24bf69"} Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.591873 4848 scope.go:117] "RemoveContainer" containerID="33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.591887 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fsgh8" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.613879 4848 scope.go:117] "RemoveContainer" containerID="33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e" Jan 28 12:53:31 crc kubenswrapper[4848]: E0128 12:53:31.614462 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e\": container with ID starting with 33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e not found: ID does not exist" containerID="33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.614508 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e"} err="failed to get container status \"33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e\": rpc error: code = NotFound desc = could not find container \"33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e\": container with ID starting with 33fe1d44dcd72fb8dcf8be9ed099e62f657c3183f48c04f36333c2761f93180e not found: ID does not exist" Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.623133 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fsgh8"] Jan 28 12:53:31 crc kubenswrapper[4848]: I0128 12:53:31.635188 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fsgh8"] Jan 28 12:53:32 crc kubenswrapper[4848]: I0128 12:53:32.857917 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1929eb16-0432-46a9-871d-3a2d75f37d7a" path="/var/lib/kubelet/pods/1929eb16-0432-46a9-871d-3a2d75f37d7a/volumes" Jan 28 12:53:37 crc kubenswrapper[4848]: I0128 12:53:37.925242 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:53:37 crc kubenswrapper[4848]: I0128 12:53:37.925667 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:53:37 crc kubenswrapper[4848]: I0128 12:53:37.925751 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:53:37 crc kubenswrapper[4848]: I0128 12:53:37.926679 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fb4546396a3b9dcca725d914f66533ca38a08639f07d6f3c7c70ed82ab8d257f"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:53:37 crc kubenswrapper[4848]: I0128 12:53:37.926777 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://fb4546396a3b9dcca725d914f66533ca38a08639f07d6f3c7c70ed82ab8d257f" gracePeriod=600 Jan 28 12:53:38 crc kubenswrapper[4848]: I0128 12:53:38.647202 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="fb4546396a3b9dcca725d914f66533ca38a08639f07d6f3c7c70ed82ab8d257f" exitCode=0 Jan 28 12:53:38 crc kubenswrapper[4848]: I0128 12:53:38.647290 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"fb4546396a3b9dcca725d914f66533ca38a08639f07d6f3c7c70ed82ab8d257f"} Jan 28 12:53:38 crc kubenswrapper[4848]: I0128 12:53:38.647785 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"6e933c1f02c6d3091e38630936db38332fbcb3f6fc3c2d979f20dc55ece773e3"} Jan 28 12:53:38 crc kubenswrapper[4848]: I0128 12:53:38.647818 4848 scope.go:117] "RemoveContainer" containerID="64887d0e29564c187d9d9245ed65bdbe1c47143084a5ed11be86cba46f633f1f" Jan 28 12:56:07 crc kubenswrapper[4848]: I0128 12:56:07.924989 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:56:07 crc kubenswrapper[4848]: I0128 12:56:07.926432 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:56:37 crc kubenswrapper[4848]: I0128 12:56:37.924414 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:56:37 crc kubenswrapper[4848]: I0128 12:56:37.925044 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:57:07 crc kubenswrapper[4848]: I0128 12:57:07.924995 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:57:07 crc kubenswrapper[4848]: I0128 12:57:07.925523 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:57:07 crc kubenswrapper[4848]: I0128 12:57:07.925566 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 12:57:07 crc kubenswrapper[4848]: I0128 12:57:07.926088 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6e933c1f02c6d3091e38630936db38332fbcb3f6fc3c2d979f20dc55ece773e3"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:57:07 crc kubenswrapper[4848]: I0128 12:57:07.926158 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://6e933c1f02c6d3091e38630936db38332fbcb3f6fc3c2d979f20dc55ece773e3" gracePeriod=600 Jan 28 12:57:08 crc kubenswrapper[4848]: I0128 12:57:08.166223 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="6e933c1f02c6d3091e38630936db38332fbcb3f6fc3c2d979f20dc55ece773e3" exitCode=0 Jan 28 12:57:08 crc kubenswrapper[4848]: I0128 12:57:08.166302 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"6e933c1f02c6d3091e38630936db38332fbcb3f6fc3c2d979f20dc55ece773e3"} Jan 28 12:57:08 crc kubenswrapper[4848]: I0128 12:57:08.166366 4848 scope.go:117] "RemoveContainer" containerID="fb4546396a3b9dcca725d914f66533ca38a08639f07d6f3c7c70ed82ab8d257f" Jan 28 12:57:09 crc kubenswrapper[4848]: I0128 12:57:09.176438 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"9b3d7bb96bb73c79bf1b8f4103851f2633f3719121e6446c4bbfd8ad6b1a1178"} Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.674518 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-cvf98"] Jan 28 12:59:05 crc kubenswrapper[4848]: E0128 12:59:05.675478 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1929eb16-0432-46a9-871d-3a2d75f37d7a" containerName="registry" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.675496 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="1929eb16-0432-46a9-871d-3a2d75f37d7a" containerName="registry" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.675632 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="1929eb16-0432-46a9-871d-3a2d75f37d7a" containerName="registry" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.676137 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.679256 4848 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-56crq" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.679498 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.680828 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.697855 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-c7q52"] Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.699358 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-c7q52" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.704487 4848 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-lw9sp" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.708760 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-cvf98"] Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.716229 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vdv4\" (UniqueName: \"kubernetes.io/projected/59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8-kube-api-access-4vdv4\") pod \"cert-manager-858654f9db-c7q52\" (UID: \"59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8\") " pod="cert-manager/cert-manager-858654f9db-c7q52" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.730371 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-c7q52"] Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.743312 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-dwrr2"] Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.744390 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.747849 4848 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-vfhkw" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.749980 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-dwrr2"] Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.818473 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vdv4\" (UniqueName: \"kubernetes.io/projected/59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8-kube-api-access-4vdv4\") pod \"cert-manager-858654f9db-c7q52\" (UID: \"59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8\") " pod="cert-manager/cert-manager-858654f9db-c7q52" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.818599 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6xld\" (UniqueName: \"kubernetes.io/projected/4661c13f-0355-4d7e-b7d9-5a3446bfcc17-kube-api-access-j6xld\") pod \"cert-manager-webhook-687f57d79b-dwrr2\" (UID: \"4661c13f-0355-4d7e-b7d9-5a3446bfcc17\") " pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.818634 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqhln\" (UniqueName: \"kubernetes.io/projected/09794657-9406-4696-9df8-0f0d782604de-kube-api-access-gqhln\") pod \"cert-manager-cainjector-cf98fcc89-cvf98\" (UID: \"09794657-9406-4696-9df8-0f0d782604de\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.841650 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vdv4\" (UniqueName: \"kubernetes.io/projected/59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8-kube-api-access-4vdv4\") pod \"cert-manager-858654f9db-c7q52\" (UID: \"59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8\") " pod="cert-manager/cert-manager-858654f9db-c7q52" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.920225 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6xld\" (UniqueName: \"kubernetes.io/projected/4661c13f-0355-4d7e-b7d9-5a3446bfcc17-kube-api-access-j6xld\") pod \"cert-manager-webhook-687f57d79b-dwrr2\" (UID: \"4661c13f-0355-4d7e-b7d9-5a3446bfcc17\") " pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.920300 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqhln\" (UniqueName: \"kubernetes.io/projected/09794657-9406-4696-9df8-0f0d782604de-kube-api-access-gqhln\") pod \"cert-manager-cainjector-cf98fcc89-cvf98\" (UID: \"09794657-9406-4696-9df8-0f0d782604de\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.939395 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqhln\" (UniqueName: \"kubernetes.io/projected/09794657-9406-4696-9df8-0f0d782604de-kube-api-access-gqhln\") pod \"cert-manager-cainjector-cf98fcc89-cvf98\" (UID: \"09794657-9406-4696-9df8-0f0d782604de\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" Jan 28 12:59:05 crc kubenswrapper[4848]: I0128 12:59:05.939529 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6xld\" (UniqueName: \"kubernetes.io/projected/4661c13f-0355-4d7e-b7d9-5a3446bfcc17-kube-api-access-j6xld\") pod \"cert-manager-webhook-687f57d79b-dwrr2\" (UID: \"4661c13f-0355-4d7e-b7d9-5a3446bfcc17\") " pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.001066 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.020307 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-c7q52" Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.062638 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.311908 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-cvf98"] Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.324298 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.366973 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-dwrr2"] Jan 28 12:59:06 crc kubenswrapper[4848]: W0128 12:59:06.370574 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4661c13f_0355_4d7e_b7d9_5a3446bfcc17.slice/crio-35e697d13afe06bac8ba80bb734ea7284b0dba0734c8c3d7d5c94cd96599a978 WatchSource:0}: Error finding container 35e697d13afe06bac8ba80bb734ea7284b0dba0734c8c3d7d5c94cd96599a978: Status 404 returned error can't find the container with id 35e697d13afe06bac8ba80bb734ea7284b0dba0734c8c3d7d5c94cd96599a978 Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.507892 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-c7q52"] Jan 28 12:59:06 crc kubenswrapper[4848]: W0128 12:59:06.513973 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59ff8003_99d4_4d16_bb2f_6b5ff9ae8ac8.slice/crio-bbce93c439beb4e65ae20ad6b3c6db88e8b092e0f03189b82551748efd8a1ecf WatchSource:0}: Error finding container bbce93c439beb4e65ae20ad6b3c6db88e8b092e0f03189b82551748efd8a1ecf: Status 404 returned error can't find the container with id bbce93c439beb4e65ae20ad6b3c6db88e8b092e0f03189b82551748efd8a1ecf Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.981330 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" event={"ID":"4661c13f-0355-4d7e-b7d9-5a3446bfcc17","Type":"ContainerStarted","Data":"35e697d13afe06bac8ba80bb734ea7284b0dba0734c8c3d7d5c94cd96599a978"} Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.984049 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-c7q52" event={"ID":"59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8","Type":"ContainerStarted","Data":"bbce93c439beb4e65ae20ad6b3c6db88e8b092e0f03189b82551748efd8a1ecf"} Jan 28 12:59:06 crc kubenswrapper[4848]: I0128 12:59:06.985609 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" event={"ID":"09794657-9406-4696-9df8-0f0d782604de","Type":"ContainerStarted","Data":"d8f83be0641fa6efaea235100e819e34aeb132796d91c1e181efeaa55d85804c"} Jan 28 12:59:09 crc kubenswrapper[4848]: I0128 12:59:09.807729 4848 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 12:59:12 crc kubenswrapper[4848]: I0128 12:59:12.021811 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" event={"ID":"09794657-9406-4696-9df8-0f0d782604de","Type":"ContainerStarted","Data":"6aa3c8efcc07c0d56e5b220aa67d391a87ef81d35cd79d95b5eda1b1e8288c52"} Jan 28 12:59:12 crc kubenswrapper[4848]: I0128 12:59:12.023387 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" event={"ID":"4661c13f-0355-4d7e-b7d9-5a3446bfcc17","Type":"ContainerStarted","Data":"baae27d9a13b7fa8926f90bb59922ad62d01cd408829ba9ea96abf96e0a86312"} Jan 28 12:59:12 crc kubenswrapper[4848]: I0128 12:59:12.023467 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" Jan 28 12:59:12 crc kubenswrapper[4848]: I0128 12:59:12.040503 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cvf98" podStartSLOduration=1.912750705 podStartE2EDuration="7.040475571s" podCreationTimestamp="2026-01-28 12:59:05 +0000 UTC" firstStartedPulling="2026-01-28 12:59:06.324065311 +0000 UTC m=+773.236282349" lastFinishedPulling="2026-01-28 12:59:11.451790177 +0000 UTC m=+778.364007215" observedRunningTime="2026-01-28 12:59:12.03533031 +0000 UTC m=+778.947547358" watchObservedRunningTime="2026-01-28 12:59:12.040475571 +0000 UTC m=+778.952692609" Jan 28 12:59:12 crc kubenswrapper[4848]: I0128 12:59:12.066131 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" podStartSLOduration=1.9884426130000001 podStartE2EDuration="7.066114245s" podCreationTimestamp="2026-01-28 12:59:05 +0000 UTC" firstStartedPulling="2026-01-28 12:59:06.373341454 +0000 UTC m=+773.285558492" lastFinishedPulling="2026-01-28 12:59:11.451013086 +0000 UTC m=+778.363230124" observedRunningTime="2026-01-28 12:59:12.064386447 +0000 UTC m=+778.976603485" watchObservedRunningTime="2026-01-28 12:59:12.066114245 +0000 UTC m=+778.978331283" Jan 28 12:59:13 crc kubenswrapper[4848]: I0128 12:59:13.032096 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-c7q52" event={"ID":"59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8","Type":"ContainerStarted","Data":"7dbbdf63e0bcc7c585f675b7b79c020c9aa6cf059963d40cec70e4108a6d4ece"} Jan 28 12:59:13 crc kubenswrapper[4848]: I0128 12:59:13.051480 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-c7q52" podStartSLOduration=2.494853208 podStartE2EDuration="8.05145689s" podCreationTimestamp="2026-01-28 12:59:05 +0000 UTC" firstStartedPulling="2026-01-28 12:59:06.51668499 +0000 UTC m=+773.428902028" lastFinishedPulling="2026-01-28 12:59:12.073288672 +0000 UTC m=+778.985505710" observedRunningTime="2026-01-28 12:59:13.047945484 +0000 UTC m=+779.960162522" watchObservedRunningTime="2026-01-28 12:59:13.05145689 +0000 UTC m=+779.963673928" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.070772 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g9vht"] Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.075061 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="nbdb" containerID="cri-o://aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.075291 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="sbdb" containerID="cri-o://0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.075365 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.075492 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-acl-logging" containerID="cri-o://2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.075572 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="northd" containerID="cri-o://cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.075437 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-node" containerID="cri-o://46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.075765 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-controller" containerID="cri-o://b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.128540 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" containerID="cri-o://a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" gracePeriod=30 Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.195184 4848 scope.go:117] "RemoveContainer" containerID="6c24fb132d653d2e0a0c0048d0f47fcee595c34f0eaef7810c448e21784e33d6" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.795018 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/3.log" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.798619 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovn-acl-logging/0.log" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.799469 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovn-controller/0.log" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.800394 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.870109 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-netd\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871126 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-env-overrides\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.870904 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871274 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871382 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871411 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-log-socket\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871453 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-kubelet\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871480 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-ovn\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871488 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-log-socket" (OuterVolumeSpecName: "log-socket") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871508 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-ovn-kubernetes\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871547 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871545 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-openvswitch\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871619 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-config\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871625 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871655 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-script-lib\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871661 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871691 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871707 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-node-log\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871758 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-systemd\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871776 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-netns\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871801 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-etc-openvswitch\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871837 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-slash\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871859 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-systemd-units\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871895 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-bin\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871912 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-var-lib-openvswitch\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871931 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rr5bz\" (UniqueName: \"kubernetes.io/projected/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-kube-api-access-rr5bz\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.871949 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovn-node-metrics-cert\") pod \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\" (UID: \"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d\") " Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.872547 4848 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.872561 4848 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.872574 4848 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.872587 4848 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-log-socket\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.872597 4848 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.872610 4848 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.872619 4848 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.873057 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.873472 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.873509 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.873949 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.873990 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874012 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874032 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-slash" (OuterVolumeSpecName: "host-slash") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874052 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jmdb5"] Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874405 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-acl-logging" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874424 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-acl-logging" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874440 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="sbdb" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874448 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="sbdb" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874458 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874468 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874480 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874473 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874491 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874540 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874551 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874564 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kubecfg-setup" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874573 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kubecfg-setup" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874585 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874595 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874610 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="northd" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874639 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="northd" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874657 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="nbdb" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874665 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="nbdb" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874676 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874696 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874705 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874715 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874725 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874733 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 12:59:15 crc kubenswrapper[4848]: E0128 12:59:15.874743 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-node" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874752 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-node" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874908 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-node" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874931 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="nbdb" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874943 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="sbdb" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874953 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874962 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874972 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874985 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.874995 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovn-acl-logging" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.875007 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="northd" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.875017 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.875026 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.875324 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerName="ovnkube-controller" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.876048 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-node-log" (OuterVolumeSpecName: "node-log") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.876741 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.878380 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.881838 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.882471 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-kube-api-access-rr5bz" (OuterVolumeSpecName: "kube-api-access-rr5bz") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "kube-api-access-rr5bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.899932 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" (UID: "a67a8b01-b8a6-4ca0-96fb-d5af26125a8d"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973465 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-env-overrides\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973518 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-slash\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973541 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-run-netns\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973566 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-systemd\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973585 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovn-node-metrics-cert\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973613 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovnkube-config\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973633 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-var-lib-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973654 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-cni-netd\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973675 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-ovn\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973694 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973713 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-cni-bin\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973741 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5ssz\" (UniqueName: \"kubernetes.io/projected/c16a6587-7d6a-46b4-93bc-0aca699581c5-kube-api-access-b5ssz\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973762 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-node-log\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973778 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-systemd-units\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973796 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-log-socket\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973813 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973837 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-run-ovn-kubernetes\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973855 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-etc-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973874 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-kubelet\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973890 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovnkube-script-lib\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973944 4848 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973961 4848 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973977 4848 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-node-log\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.973988 4848 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974000 4848 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974011 4848 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974021 4848 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-slash\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974034 4848 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974047 4848 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974057 4848 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974068 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rr5bz\" (UniqueName: \"kubernetes.io/projected/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-kube-api-access-rr5bz\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974079 4848 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4848]: I0128 12:59:15.974327 4848 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.055285 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovnkube-controller/3.log" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.058927 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovn-acl-logging/0.log" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.059556 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g9vht_a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/ovn-controller/0.log" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.059989 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" exitCode=0 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060048 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" exitCode=0 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060069 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" exitCode=0 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060061 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060126 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060139 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060165 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060085 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" exitCode=0 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060188 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060211 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060209 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" exitCode=0 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060235 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" exitCode=0 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060239 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060283 4848 scope.go:117] "RemoveContainer" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060303 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060326 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060341 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060354 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060366 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060258 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" exitCode=143 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060399 4848 generic.go:334] "Generic (PLEG): container finished" podID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" containerID="b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" exitCode=143 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060378 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060513 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060549 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060559 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060590 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060628 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060641 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060651 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060663 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060672 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060682 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060691 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060700 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060710 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060719 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060732 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060746 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060756 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060767 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060775 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060784 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060792 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060800 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060810 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060820 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060833 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060851 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g9vht" event={"ID":"a67a8b01-b8a6-4ca0-96fb-d5af26125a8d","Type":"ContainerDied","Data":"d720d7463c7132c730a439c10bf953881337e050739bb4d9e5f90afc1f6ed34c"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060869 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060882 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060892 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060903 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060914 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060926 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060936 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060946 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060956 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.060967 4848 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.062454 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/2.log" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.062507 4848 generic.go:334] "Generic (PLEG): container finished" podID="52f51c55-df27-4e41-b7c5-e3d714909803" containerID="48213125717d28145348d56b365f5cf3ae7ce7690b5dc23aec948d05ef4b7fea" exitCode=2 Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.062539 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerDied","Data":"48213125717d28145348d56b365f5cf3ae7ce7690b5dc23aec948d05ef4b7fea"} Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.064468 4848 scope.go:117] "RemoveContainer" containerID="48213125717d28145348d56b365f5cf3ae7ce7690b5dc23aec948d05ef4b7fea" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.066125 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-dwrr2" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.074744 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-systemd-units\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.074809 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-log-socket\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.074956 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-systemd-units\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.075027 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-log-socket\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.075157 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.075982 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076071 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-run-ovn-kubernetes\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076121 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-etc-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076159 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-kubelet\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076194 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovnkube-script-lib\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076305 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-env-overrides\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076334 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-run-ovn-kubernetes\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076353 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-slash\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076420 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-slash\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076460 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-run-netns\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076518 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-systemd\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076547 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovn-node-metrics-cert\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076609 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovnkube-config\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076643 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-var-lib-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076687 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-cni-netd\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076827 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-ovn\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076868 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077089 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovnkube-script-lib\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077160 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-kubelet\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077231 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-cni-bin\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077416 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5ssz\" (UniqueName: \"kubernetes.io/projected/c16a6587-7d6a-46b4-93bc-0aca699581c5-kube-api-access-b5ssz\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077561 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-node-log\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077713 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-node-log\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077823 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-env-overrides\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.077981 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.078071 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-var-lib-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.078113 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-cni-netd\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.078142 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-ovn\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.078173 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-run-netns\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.076334 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-etc-openvswitch\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.078223 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-run-systemd\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.078320 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c16a6587-7d6a-46b4-93bc-0aca699581c5-host-cni-bin\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.078714 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovnkube-config\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.080678 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c16a6587-7d6a-46b4-93bc-0aca699581c5-ovn-node-metrics-cert\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.100177 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5ssz\" (UniqueName: \"kubernetes.io/projected/c16a6587-7d6a-46b4-93bc-0aca699581c5-kube-api-access-b5ssz\") pod \"ovnkube-node-jmdb5\" (UID: \"c16a6587-7d6a-46b4-93bc-0aca699581c5\") " pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.104617 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.134563 4848 scope.go:117] "RemoveContainer" containerID="0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.141812 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g9vht"] Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.145293 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g9vht"] Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.158047 4848 scope.go:117] "RemoveContainer" containerID="aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.174282 4848 scope.go:117] "RemoveContainer" containerID="cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.191002 4848 scope.go:117] "RemoveContainer" containerID="51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.215529 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.231221 4848 scope.go:117] "RemoveContainer" containerID="46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.314179 4848 scope.go:117] "RemoveContainer" containerID="2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.332237 4848 scope.go:117] "RemoveContainer" containerID="b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.353982 4848 scope.go:117] "RemoveContainer" containerID="301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.372936 4848 scope.go:117] "RemoveContainer" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.375381 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": container with ID starting with a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a not found: ID does not exist" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.375426 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} err="failed to get container status \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": rpc error: code = NotFound desc = could not find container \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": container with ID starting with a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.375461 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.376025 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": container with ID starting with d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168 not found: ID does not exist" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.376057 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} err="failed to get container status \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": rpc error: code = NotFound desc = could not find container \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": container with ID starting with d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.376079 4848 scope.go:117] "RemoveContainer" containerID="0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.376599 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": container with ID starting with 0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613 not found: ID does not exist" containerID="0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.376665 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} err="failed to get container status \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": rpc error: code = NotFound desc = could not find container \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": container with ID starting with 0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.376708 4848 scope.go:117] "RemoveContainer" containerID="aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.377174 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": container with ID starting with aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb not found: ID does not exist" containerID="aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.377212 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} err="failed to get container status \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": rpc error: code = NotFound desc = could not find container \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": container with ID starting with aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.377227 4848 scope.go:117] "RemoveContainer" containerID="cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.377678 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": container with ID starting with cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d not found: ID does not exist" containerID="cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.377710 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} err="failed to get container status \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": rpc error: code = NotFound desc = could not find container \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": container with ID starting with cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.377728 4848 scope.go:117] "RemoveContainer" containerID="51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.378295 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": container with ID starting with 51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d not found: ID does not exist" containerID="51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.378348 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} err="failed to get container status \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": rpc error: code = NotFound desc = could not find container \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": container with ID starting with 51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.378372 4848 scope.go:117] "RemoveContainer" containerID="46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.379054 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": container with ID starting with 46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a not found: ID does not exist" containerID="46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.379092 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} err="failed to get container status \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": rpc error: code = NotFound desc = could not find container \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": container with ID starting with 46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.379111 4848 scope.go:117] "RemoveContainer" containerID="2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.379532 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": container with ID starting with 2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb not found: ID does not exist" containerID="2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.379572 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} err="failed to get container status \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": rpc error: code = NotFound desc = could not find container \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": container with ID starting with 2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.379596 4848 scope.go:117] "RemoveContainer" containerID="b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.379927 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": container with ID starting with b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f not found: ID does not exist" containerID="b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.379961 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} err="failed to get container status \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": rpc error: code = NotFound desc = could not find container \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": container with ID starting with b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.379979 4848 scope.go:117] "RemoveContainer" containerID="301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3" Jan 28 12:59:16 crc kubenswrapper[4848]: E0128 12:59:16.380257 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": container with ID starting with 301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3 not found: ID does not exist" containerID="301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.380278 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} err="failed to get container status \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": rpc error: code = NotFound desc = could not find container \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": container with ID starting with 301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.380291 4848 scope.go:117] "RemoveContainer" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.381535 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} err="failed to get container status \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": rpc error: code = NotFound desc = could not find container \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": container with ID starting with a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.381560 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.382309 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} err="failed to get container status \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": rpc error: code = NotFound desc = could not find container \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": container with ID starting with d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.382337 4848 scope.go:117] "RemoveContainer" containerID="0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.382770 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} err="failed to get container status \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": rpc error: code = NotFound desc = could not find container \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": container with ID starting with 0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.382808 4848 scope.go:117] "RemoveContainer" containerID="aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.383486 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} err="failed to get container status \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": rpc error: code = NotFound desc = could not find container \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": container with ID starting with aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.383523 4848 scope.go:117] "RemoveContainer" containerID="cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.383959 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} err="failed to get container status \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": rpc error: code = NotFound desc = could not find container \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": container with ID starting with cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.383984 4848 scope.go:117] "RemoveContainer" containerID="51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.384378 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} err="failed to get container status \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": rpc error: code = NotFound desc = could not find container \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": container with ID starting with 51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.384428 4848 scope.go:117] "RemoveContainer" containerID="46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.385055 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} err="failed to get container status \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": rpc error: code = NotFound desc = could not find container \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": container with ID starting with 46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.385078 4848 scope.go:117] "RemoveContainer" containerID="2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.385408 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} err="failed to get container status \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": rpc error: code = NotFound desc = could not find container \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": container with ID starting with 2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.385441 4848 scope.go:117] "RemoveContainer" containerID="b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.385894 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} err="failed to get container status \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": rpc error: code = NotFound desc = could not find container \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": container with ID starting with b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.385922 4848 scope.go:117] "RemoveContainer" containerID="301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.386299 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} err="failed to get container status \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": rpc error: code = NotFound desc = could not find container \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": container with ID starting with 301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.386326 4848 scope.go:117] "RemoveContainer" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.386729 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} err="failed to get container status \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": rpc error: code = NotFound desc = could not find container \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": container with ID starting with a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.386784 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.387117 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} err="failed to get container status \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": rpc error: code = NotFound desc = could not find container \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": container with ID starting with d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.387144 4848 scope.go:117] "RemoveContainer" containerID="0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.387578 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} err="failed to get container status \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": rpc error: code = NotFound desc = could not find container \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": container with ID starting with 0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.387624 4848 scope.go:117] "RemoveContainer" containerID="aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.388048 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} err="failed to get container status \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": rpc error: code = NotFound desc = could not find container \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": container with ID starting with aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.388102 4848 scope.go:117] "RemoveContainer" containerID="cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.388538 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} err="failed to get container status \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": rpc error: code = NotFound desc = could not find container \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": container with ID starting with cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.388573 4848 scope.go:117] "RemoveContainer" containerID="51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.388913 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} err="failed to get container status \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": rpc error: code = NotFound desc = could not find container \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": container with ID starting with 51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.388939 4848 scope.go:117] "RemoveContainer" containerID="46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.389240 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} err="failed to get container status \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": rpc error: code = NotFound desc = could not find container \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": container with ID starting with 46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.389294 4848 scope.go:117] "RemoveContainer" containerID="2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.390457 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} err="failed to get container status \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": rpc error: code = NotFound desc = could not find container \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": container with ID starting with 2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.390512 4848 scope.go:117] "RemoveContainer" containerID="b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.390863 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} err="failed to get container status \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": rpc error: code = NotFound desc = could not find container \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": container with ID starting with b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.390899 4848 scope.go:117] "RemoveContainer" containerID="301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.391234 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} err="failed to get container status \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": rpc error: code = NotFound desc = could not find container \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": container with ID starting with 301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.391323 4848 scope.go:117] "RemoveContainer" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.391614 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} err="failed to get container status \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": rpc error: code = NotFound desc = could not find container \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": container with ID starting with a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.391655 4848 scope.go:117] "RemoveContainer" containerID="d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.391939 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168"} err="failed to get container status \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": rpc error: code = NotFound desc = could not find container \"d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168\": container with ID starting with d8930c9600e180384256f2133f1a2b949993457896e4412f5b06dd9e4ea69168 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.391985 4848 scope.go:117] "RemoveContainer" containerID="0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.392284 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613"} err="failed to get container status \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": rpc error: code = NotFound desc = could not find container \"0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613\": container with ID starting with 0404ea151d3a6f139821dfb2a4c1642c20a1b7846b090f6d0f11c73068113613 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.392317 4848 scope.go:117] "RemoveContainer" containerID="aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.392568 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb"} err="failed to get container status \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": rpc error: code = NotFound desc = could not find container \"aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb\": container with ID starting with aa608366c09b4a7c4c4b20bd0c98d8c92f661d318bd31b69bb6f8dec60b1bafb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.392589 4848 scope.go:117] "RemoveContainer" containerID="cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.392999 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d"} err="failed to get container status \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": rpc error: code = NotFound desc = could not find container \"cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d\": container with ID starting with cd7c17a54de60a351f5af56acf8bf411fd2743a04ad877626e0d15fcb20a1f7d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.393050 4848 scope.go:117] "RemoveContainer" containerID="51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.393427 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d"} err="failed to get container status \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": rpc error: code = NotFound desc = could not find container \"51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d\": container with ID starting with 51196aaf507052a99310094c49c272a19d9149fb768d7a6c32121ea8cc391d2d not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.393458 4848 scope.go:117] "RemoveContainer" containerID="46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.393758 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a"} err="failed to get container status \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": rpc error: code = NotFound desc = could not find container \"46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a\": container with ID starting with 46c9aa083e21f6658dd64d6b9c10bb4c0c2168e5e715b1f551ede07177d9032a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.393790 4848 scope.go:117] "RemoveContainer" containerID="2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.394074 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb"} err="failed to get container status \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": rpc error: code = NotFound desc = could not find container \"2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb\": container with ID starting with 2ae10a4c0ba99f5d37e48a50118861e4198d6166fa9d0377b1fcfec3f1b339eb not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.394110 4848 scope.go:117] "RemoveContainer" containerID="b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.394442 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f"} err="failed to get container status \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": rpc error: code = NotFound desc = could not find container \"b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f\": container with ID starting with b7327c34b69a2e3737b6506b11211edebae14b147ff45af988bcb3612fad8e6f not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.394475 4848 scope.go:117] "RemoveContainer" containerID="301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.394724 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3"} err="failed to get container status \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": rpc error: code = NotFound desc = could not find container \"301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3\": container with ID starting with 301ba558042ce69bf031065656bef4cca397fd2ce5eceb73ebbee02362e0a5b3 not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.394754 4848 scope.go:117] "RemoveContainer" containerID="a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.395136 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a"} err="failed to get container status \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": rpc error: code = NotFound desc = could not find container \"a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a\": container with ID starting with a212ab15133aae2ad5ea39f830064f71e528e33495fe0853be91ab85c38cec3a not found: ID does not exist" Jan 28 12:59:16 crc kubenswrapper[4848]: I0128 12:59:16.859660 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a67a8b01-b8a6-4ca0-96fb-d5af26125a8d" path="/var/lib/kubelet/pods/a67a8b01-b8a6-4ca0-96fb-d5af26125a8d/volumes" Jan 28 12:59:17 crc kubenswrapper[4848]: I0128 12:59:17.073236 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bmnpt_52f51c55-df27-4e41-b7c5-e3d714909803/kube-multus/2.log" Jan 28 12:59:17 crc kubenswrapper[4848]: I0128 12:59:17.073387 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bmnpt" event={"ID":"52f51c55-df27-4e41-b7c5-e3d714909803","Type":"ContainerStarted","Data":"a4dd68b368c67224d3af717b1907fdf9b288c8a9a223237bbc2936fb10eda916"} Jan 28 12:59:17 crc kubenswrapper[4848]: I0128 12:59:17.079150 4848 generic.go:334] "Generic (PLEG): container finished" podID="c16a6587-7d6a-46b4-93bc-0aca699581c5" containerID="72e1fc31bacb65a7526f30be123eb59df59207eb21661da884eb6b287b0e3ac3" exitCode=0 Jan 28 12:59:17 crc kubenswrapper[4848]: I0128 12:59:17.079211 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerDied","Data":"72e1fc31bacb65a7526f30be123eb59df59207eb21661da884eb6b287b0e3ac3"} Jan 28 12:59:17 crc kubenswrapper[4848]: I0128 12:59:17.079299 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"59f3d41c1781a910f8c77a272177bc12398092ee7c0e5e99b33881c111f3f527"} Jan 28 12:59:18 crc kubenswrapper[4848]: I0128 12:59:18.103434 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"5aa09701df6a0b474cca449e5c13bcfd6bf7ca015e8a3197f56a72d4f6340d9c"} Jan 28 12:59:18 crc kubenswrapper[4848]: I0128 12:59:18.103856 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"5a04571b53b144623f7cd5d494e0de0d7cf673e66a5f26c53af8dbce59bb6570"} Jan 28 12:59:18 crc kubenswrapper[4848]: I0128 12:59:18.103869 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"4029dc856ce9e9c6d6982d2804486afa733febdc25ed1062b83ab0efebd420b8"} Jan 28 12:59:18 crc kubenswrapper[4848]: I0128 12:59:18.103878 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"dc25a83af69874679379ce5944124d10c3c994398917a5ca603c32b91370ae4b"} Jan 28 12:59:18 crc kubenswrapper[4848]: I0128 12:59:18.103889 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"2baee4ec95705a7c6512c1108b246367870fde8c18f7e35c1c92ef0d099ac7c6"} Jan 28 12:59:18 crc kubenswrapper[4848]: I0128 12:59:18.103900 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"2de06675c0e4de442e922cf3ba54ff74b4c886924e58b0717b5c3b3da4a17fe1"} Jan 28 12:59:20 crc kubenswrapper[4848]: I0128 12:59:20.120031 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"047ece16882efa2989119909431b72bc080c378ee2493c04c4851b98f1a9a266"} Jan 28 12:59:23 crc kubenswrapper[4848]: I0128 12:59:23.145954 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" event={"ID":"c16a6587-7d6a-46b4-93bc-0aca699581c5","Type":"ContainerStarted","Data":"741d56631ecbf80463fd7119c822dc154b7a69c7a5f22b9d830e2b0caf0e2e98"} Jan 28 12:59:23 crc kubenswrapper[4848]: I0128 12:59:23.146738 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:23 crc kubenswrapper[4848]: I0128 12:59:23.146777 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:23 crc kubenswrapper[4848]: I0128 12:59:23.146986 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:23 crc kubenswrapper[4848]: I0128 12:59:23.179511 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:23 crc kubenswrapper[4848]: I0128 12:59:23.192427 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:23 crc kubenswrapper[4848]: I0128 12:59:23.232315 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" podStartSLOduration=8.23229093 podStartE2EDuration="8.23229093s" podCreationTimestamp="2026-01-28 12:59:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:59:23.201516095 +0000 UTC m=+790.113733153" watchObservedRunningTime="2026-01-28 12:59:23.23229093 +0000 UTC m=+790.144508008" Jan 28 12:59:37 crc kubenswrapper[4848]: I0128 12:59:37.924932 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:59:37 crc kubenswrapper[4848]: I0128 12:59:37.925695 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:59:46 crc kubenswrapper[4848]: I0128 12:59:46.242538 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jmdb5" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.333607 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj"] Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.335348 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.339156 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.351910 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj"] Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.435025 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.435120 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.435220 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kwrp\" (UniqueName: \"kubernetes.io/projected/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-kube-api-access-7kwrp\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.537480 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.537571 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.537692 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kwrp\" (UniqueName: \"kubernetes.io/projected/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-kube-api-access-7kwrp\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.538514 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.538515 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.563910 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kwrp\" (UniqueName: \"kubernetes.io/projected/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-kube-api-access-7kwrp\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.660496 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:47 crc kubenswrapper[4848]: I0128 12:59:47.931156 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj"] Jan 28 12:59:48 crc kubenswrapper[4848]: I0128 12:59:48.317489 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" event={"ID":"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7","Type":"ContainerStarted","Data":"8ccfa85b4e245cd2f08ad658174d83180f8738fbd192169410b97826d05ff8e8"} Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.333810 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" event={"ID":"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7","Type":"ContainerStarted","Data":"585ba3e8a1a50ac83d2dd6a641bdfc2634680b90b77d7e470626823501cba86f"} Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.650299 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q5wgd"] Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.654187 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.662633 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q5wgd"] Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.771646 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-catalog-content\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.771734 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-utilities\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.771811 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pljzl\" (UniqueName: \"kubernetes.io/projected/0b15a664-642b-46b8-90c1-0d17a9cacd87-kube-api-access-pljzl\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.874237 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-catalog-content\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.874344 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-utilities\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.874437 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pljzl\" (UniqueName: \"kubernetes.io/projected/0b15a664-642b-46b8-90c1-0d17a9cacd87-kube-api-access-pljzl\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.875012 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-utilities\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.875355 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-catalog-content\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.902201 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pljzl\" (UniqueName: \"kubernetes.io/projected/0b15a664-642b-46b8-90c1-0d17a9cacd87-kube-api-access-pljzl\") pod \"redhat-operators-q5wgd\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:49 crc kubenswrapper[4848]: I0128 12:59:49.975875 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:50 crc kubenswrapper[4848]: I0128 12:59:50.210886 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q5wgd"] Jan 28 12:59:50 crc kubenswrapper[4848]: I0128 12:59:50.341605 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5wgd" event={"ID":"0b15a664-642b-46b8-90c1-0d17a9cacd87","Type":"ContainerStarted","Data":"334205b021ebf63f8cef16df5e84ad5ee4ad186159ecb37935cce8f6e8eba0c8"} Jan 28 12:59:50 crc kubenswrapper[4848]: I0128 12:59:50.343313 4848 generic.go:334] "Generic (PLEG): container finished" podID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerID="585ba3e8a1a50ac83d2dd6a641bdfc2634680b90b77d7e470626823501cba86f" exitCode=0 Jan 28 12:59:50 crc kubenswrapper[4848]: I0128 12:59:50.343344 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" event={"ID":"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7","Type":"ContainerDied","Data":"585ba3e8a1a50ac83d2dd6a641bdfc2634680b90b77d7e470626823501cba86f"} Jan 28 12:59:51 crc kubenswrapper[4848]: I0128 12:59:51.352150 4848 generic.go:334] "Generic (PLEG): container finished" podID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerID="2a44714c4f1980475ba9eeb010c7c87b87a3bf15d47d456be6124b4153e7dc7f" exitCode=0 Jan 28 12:59:51 crc kubenswrapper[4848]: I0128 12:59:51.352276 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5wgd" event={"ID":"0b15a664-642b-46b8-90c1-0d17a9cacd87","Type":"ContainerDied","Data":"2a44714c4f1980475ba9eeb010c7c87b87a3bf15d47d456be6124b4153e7dc7f"} Jan 28 12:59:53 crc kubenswrapper[4848]: I0128 12:59:53.368970 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5wgd" event={"ID":"0b15a664-642b-46b8-90c1-0d17a9cacd87","Type":"ContainerStarted","Data":"75a5f1e41c1d19526fb65d4c9bfb280ba5c78852092bbe1ef8e864366098e20b"} Jan 28 12:59:53 crc kubenswrapper[4848]: I0128 12:59:53.372066 4848 generic.go:334] "Generic (PLEG): container finished" podID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerID="93d886f444e7613c84dafdaf8f2abc088f8ff5b33396a39b6f8776768bd5bfb7" exitCode=0 Jan 28 12:59:53 crc kubenswrapper[4848]: I0128 12:59:53.372116 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" event={"ID":"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7","Type":"ContainerDied","Data":"93d886f444e7613c84dafdaf8f2abc088f8ff5b33396a39b6f8776768bd5bfb7"} Jan 28 12:59:54 crc kubenswrapper[4848]: I0128 12:59:54.381152 4848 generic.go:334] "Generic (PLEG): container finished" podID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerID="75a5f1e41c1d19526fb65d4c9bfb280ba5c78852092bbe1ef8e864366098e20b" exitCode=0 Jan 28 12:59:54 crc kubenswrapper[4848]: I0128 12:59:54.381299 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5wgd" event={"ID":"0b15a664-642b-46b8-90c1-0d17a9cacd87","Type":"ContainerDied","Data":"75a5f1e41c1d19526fb65d4c9bfb280ba5c78852092bbe1ef8e864366098e20b"} Jan 28 12:59:54 crc kubenswrapper[4848]: I0128 12:59:54.385184 4848 generic.go:334] "Generic (PLEG): container finished" podID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerID="6863d5facc3884c9ddfbdcd4c8b7ae362ebd9151d3eff9bd8cd27be70280934d" exitCode=0 Jan 28 12:59:54 crc kubenswrapper[4848]: I0128 12:59:54.385269 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" event={"ID":"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7","Type":"ContainerDied","Data":"6863d5facc3884c9ddfbdcd4c8b7ae362ebd9151d3eff9bd8cd27be70280934d"} Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.393096 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5wgd" event={"ID":"0b15a664-642b-46b8-90c1-0d17a9cacd87","Type":"ContainerStarted","Data":"6f035c48e57c68c79be3ad61442cd08d5adca50597ea16644f6f416f25068230"} Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.431221 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q5wgd" podStartSLOduration=2.814669935 podStartE2EDuration="6.431189958s" podCreationTimestamp="2026-01-28 12:59:49 +0000 UTC" firstStartedPulling="2026-01-28 12:59:51.354592901 +0000 UTC m=+818.266809939" lastFinishedPulling="2026-01-28 12:59:54.971112934 +0000 UTC m=+821.883329962" observedRunningTime="2026-01-28 12:59:55.426370845 +0000 UTC m=+822.338587913" watchObservedRunningTime="2026-01-28 12:59:55.431189958 +0000 UTC m=+822.343407006" Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.662689 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.764275 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-util\") pod \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.764397 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kwrp\" (UniqueName: \"kubernetes.io/projected/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-kube-api-access-7kwrp\") pod \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.764419 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-bundle\") pod \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\" (UID: \"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7\") " Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.767347 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-bundle" (OuterVolumeSpecName: "bundle") pod "d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" (UID: "d21dbbe3-7e51-4175-8602-91d4f3d3d8b7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.775048 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-kube-api-access-7kwrp" (OuterVolumeSpecName: "kube-api-access-7kwrp") pod "d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" (UID: "d21dbbe3-7e51-4175-8602-91d4f3d3d8b7"). InnerVolumeSpecName "kube-api-access-7kwrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.775184 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-util" (OuterVolumeSpecName: "util") pod "d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" (UID: "d21dbbe3-7e51-4175-8602-91d4f3d3d8b7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.866303 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kwrp\" (UniqueName: \"kubernetes.io/projected/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-kube-api-access-7kwrp\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.866344 4848 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:55 crc kubenswrapper[4848]: I0128 12:59:55.866353 4848 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d21dbbe3-7e51-4175-8602-91d4f3d3d8b7-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:56 crc kubenswrapper[4848]: I0128 12:59:56.401862 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" event={"ID":"d21dbbe3-7e51-4175-8602-91d4f3d3d8b7","Type":"ContainerDied","Data":"8ccfa85b4e245cd2f08ad658174d83180f8738fbd192169410b97826d05ff8e8"} Jan 28 12:59:56 crc kubenswrapper[4848]: I0128 12:59:56.401926 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ccfa85b4e245cd2f08ad658174d83180f8738fbd192169410b97826d05ff8e8" Jan 28 12:59:56 crc kubenswrapper[4848]: I0128 12:59:56.402216 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj" Jan 28 12:59:59 crc kubenswrapper[4848]: I0128 12:59:59.977452 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 12:59:59 crc kubenswrapper[4848]: I0128 12:59:59.977817 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.205586 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb"] Jan 28 13:00:00 crc kubenswrapper[4848]: E0128 13:00:00.205839 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerName="extract" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.205853 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerName="extract" Jan 28 13:00:00 crc kubenswrapper[4848]: E0128 13:00:00.205876 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerName="pull" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.205884 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerName="pull" Jan 28 13:00:00 crc kubenswrapper[4848]: E0128 13:00:00.205893 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerName="util" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.205902 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerName="util" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.206012 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="d21dbbe3-7e51-4175-8602-91d4f3d3d8b7" containerName="extract" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.206413 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.208918 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.209384 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.222006 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb"] Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.329037 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1917adb5-e9d0-44e9-9176-afb51d9c0f30-secret-volume\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.329118 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1917adb5-e9d0-44e9-9176-afb51d9c0f30-config-volume\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.329261 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9svsw\" (UniqueName: \"kubernetes.io/projected/1917adb5-e9d0-44e9-9176-afb51d9c0f30-kube-api-access-9svsw\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.431297 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9svsw\" (UniqueName: \"kubernetes.io/projected/1917adb5-e9d0-44e9-9176-afb51d9c0f30-kube-api-access-9svsw\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.431403 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1917adb5-e9d0-44e9-9176-afb51d9c0f30-secret-volume\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.431435 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1917adb5-e9d0-44e9-9176-afb51d9c0f30-config-volume\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.432696 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1917adb5-e9d0-44e9-9176-afb51d9c0f30-config-volume\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.454215 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1917adb5-e9d0-44e9-9176-afb51d9c0f30-secret-volume\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.648737 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9svsw\" (UniqueName: \"kubernetes.io/projected/1917adb5-e9d0-44e9-9176-afb51d9c0f30-kube-api-access-9svsw\") pod \"collect-profiles-29493420-ss2wb\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:00 crc kubenswrapper[4848]: I0128 13:00:00.822910 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:01 crc kubenswrapper[4848]: I0128 13:00:01.046954 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q5wgd" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="registry-server" probeResult="failure" output=< Jan 28 13:00:01 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:00:01 crc kubenswrapper[4848]: > Jan 28 13:00:01 crc kubenswrapper[4848]: I0128 13:00:01.174069 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb"] Jan 28 13:00:01 crc kubenswrapper[4848]: I0128 13:00:01.434974 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" event={"ID":"1917adb5-e9d0-44e9-9176-afb51d9c0f30","Type":"ContainerStarted","Data":"356c70a4ab35258d1178b8d931164158b06a5e98e3b42af562391b6824c61d1e"} Jan 28 13:00:04 crc kubenswrapper[4848]: I0128 13:00:04.459860 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" event={"ID":"1917adb5-e9d0-44e9-9176-afb51d9c0f30","Type":"ContainerStarted","Data":"3b0362aaee98900bfca0f46b8768113073100c8e5f1880eb1e4029b3aeb38104"} Jan 28 13:00:04 crc kubenswrapper[4848]: I0128 13:00:04.483600 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" podStartSLOduration=4.483578797 podStartE2EDuration="4.483578797s" podCreationTimestamp="2026-01-28 13:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:00:04.47861764 +0000 UTC m=+831.390834678" watchObservedRunningTime="2026-01-28 13:00:04.483578797 +0000 UTC m=+831.395795835" Jan 28 13:00:04 crc kubenswrapper[4848]: E0128 13:00:04.774983 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1917adb5_e9d0_44e9_9176_afb51d9c0f30.slice/crio-conmon-3b0362aaee98900bfca0f46b8768113073100c8e5f1880eb1e4029b3aeb38104.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.314161 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.315582 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.317766 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-dfvlm" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.318337 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.324546 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.329690 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.399026 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.399883 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.404983 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.405209 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-v7wvm" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.409894 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.411082 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.429660 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.434239 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.447580 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/40955df6-8a58-487d-98fb-f8632536c72a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8\" (UID: \"40955df6-8a58-487d-98fb-f8632536c72a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.447702 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/40955df6-8a58-487d-98fb-f8632536c72a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8\" (UID: \"40955df6-8a58-487d-98fb-f8632536c72a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.447737 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/25424d22-6211-41f8-9482-de5ca224224c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn\" (UID: \"25424d22-6211-41f8-9482-de5ca224224c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.447760 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/25424d22-6211-41f8-9482-de5ca224224c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn\" (UID: \"25424d22-6211-41f8-9482-de5ca224224c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.447787 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcjjj\" (UniqueName: \"kubernetes.io/projected/021caff7-8415-451a-941e-20d025a0aa2b-kube-api-access-zcjjj\") pod \"obo-prometheus-operator-68bc856cb9-pwsdh\" (UID: \"021caff7-8415-451a-941e-20d025a0aa2b\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.480637 4848 generic.go:334] "Generic (PLEG): container finished" podID="1917adb5-e9d0-44e9-9176-afb51d9c0f30" containerID="3b0362aaee98900bfca0f46b8768113073100c8e5f1880eb1e4029b3aeb38104" exitCode=0 Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.480693 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" event={"ID":"1917adb5-e9d0-44e9-9176-afb51d9c0f30","Type":"ContainerDied","Data":"3b0362aaee98900bfca0f46b8768113073100c8e5f1880eb1e4029b3aeb38104"} Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.555014 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/40955df6-8a58-487d-98fb-f8632536c72a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8\" (UID: \"40955df6-8a58-487d-98fb-f8632536c72a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.555128 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/40955df6-8a58-487d-98fb-f8632536c72a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8\" (UID: \"40955df6-8a58-487d-98fb-f8632536c72a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.555161 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/25424d22-6211-41f8-9482-de5ca224224c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn\" (UID: \"25424d22-6211-41f8-9482-de5ca224224c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.555182 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/25424d22-6211-41f8-9482-de5ca224224c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn\" (UID: \"25424d22-6211-41f8-9482-de5ca224224c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.555213 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcjjj\" (UniqueName: \"kubernetes.io/projected/021caff7-8415-451a-941e-20d025a0aa2b-kube-api-access-zcjjj\") pod \"obo-prometheus-operator-68bc856cb9-pwsdh\" (UID: \"021caff7-8415-451a-941e-20d025a0aa2b\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.565316 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/25424d22-6211-41f8-9482-de5ca224224c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn\" (UID: \"25424d22-6211-41f8-9482-de5ca224224c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.565752 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/40955df6-8a58-487d-98fb-f8632536c72a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8\" (UID: \"40955df6-8a58-487d-98fb-f8632536c72a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.576923 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/40955df6-8a58-487d-98fb-f8632536c72a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8\" (UID: \"40955df6-8a58-487d-98fb-f8632536c72a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.579353 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/25424d22-6211-41f8-9482-de5ca224224c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn\" (UID: \"25424d22-6211-41f8-9482-de5ca224224c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.583704 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-hs6jb"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.584910 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.588817 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.589013 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-8k894" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.594805 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcjjj\" (UniqueName: \"kubernetes.io/projected/021caff7-8415-451a-941e-20d025a0aa2b-kube-api-access-zcjjj\") pod \"obo-prometheus-operator-68bc856cb9-pwsdh\" (UID: \"021caff7-8415-451a-941e-20d025a0aa2b\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.605181 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-hs6jb"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.648019 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.668154 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ec6c23a2-9920-4672-92c6-c44569e918d4-observability-operator-tls\") pod \"observability-operator-59bdc8b94-hs6jb\" (UID: \"ec6c23a2-9920-4672-92c6-c44569e918d4\") " pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.668213 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzpxd\" (UniqueName: \"kubernetes.io/projected/ec6c23a2-9920-4672-92c6-c44569e918d4-kube-api-access-rzpxd\") pod \"observability-operator-59bdc8b94-hs6jb\" (UID: \"ec6c23a2-9920-4672-92c6-c44569e918d4\") " pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.714433 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.733643 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.772576 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ec6c23a2-9920-4672-92c6-c44569e918d4-observability-operator-tls\") pod \"observability-operator-59bdc8b94-hs6jb\" (UID: \"ec6c23a2-9920-4672-92c6-c44569e918d4\") " pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.772634 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzpxd\" (UniqueName: \"kubernetes.io/projected/ec6c23a2-9920-4672-92c6-c44569e918d4-kube-api-access-rzpxd\") pod \"observability-operator-59bdc8b94-hs6jb\" (UID: \"ec6c23a2-9920-4672-92c6-c44569e918d4\") " pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.776326 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ec6c23a2-9920-4672-92c6-c44569e918d4-observability-operator-tls\") pod \"observability-operator-59bdc8b94-hs6jb\" (UID: \"ec6c23a2-9920-4672-92c6-c44569e918d4\") " pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.813746 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-lh2xv"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.814755 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-lh2xv"] Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.814877 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.821534 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-t87zc" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.832145 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzpxd\" (UniqueName: \"kubernetes.io/projected/ec6c23a2-9920-4672-92c6-c44569e918d4-kube-api-access-rzpxd\") pod \"observability-operator-59bdc8b94-hs6jb\" (UID: \"ec6c23a2-9920-4672-92c6-c44569e918d4\") " pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.977366 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5xkc\" (UniqueName: \"kubernetes.io/projected/ff57a0c9-f0c9-4ba1-9166-37cb03178711-kube-api-access-f5xkc\") pod \"perses-operator-5bf474d74f-lh2xv\" (UID: \"ff57a0c9-f0c9-4ba1-9166-37cb03178711\") " pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.977434 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/ff57a0c9-f0c9-4ba1-9166-37cb03178711-openshift-service-ca\") pod \"perses-operator-5bf474d74f-lh2xv\" (UID: \"ff57a0c9-f0c9-4ba1-9166-37cb03178711\") " pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:05 crc kubenswrapper[4848]: I0128 13:00:05.977997 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.038499 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh"] Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.079150 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5xkc\" (UniqueName: \"kubernetes.io/projected/ff57a0c9-f0c9-4ba1-9166-37cb03178711-kube-api-access-f5xkc\") pod \"perses-operator-5bf474d74f-lh2xv\" (UID: \"ff57a0c9-f0c9-4ba1-9166-37cb03178711\") " pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.079228 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/ff57a0c9-f0c9-4ba1-9166-37cb03178711-openshift-service-ca\") pod \"perses-operator-5bf474d74f-lh2xv\" (UID: \"ff57a0c9-f0c9-4ba1-9166-37cb03178711\") " pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.080709 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/ff57a0c9-f0c9-4ba1-9166-37cb03178711-openshift-service-ca\") pod \"perses-operator-5bf474d74f-lh2xv\" (UID: \"ff57a0c9-f0c9-4ba1-9166-37cb03178711\") " pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.103446 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5xkc\" (UniqueName: \"kubernetes.io/projected/ff57a0c9-f0c9-4ba1-9166-37cb03178711-kube-api-access-f5xkc\") pod \"perses-operator-5bf474d74f-lh2xv\" (UID: \"ff57a0c9-f0c9-4ba1-9166-37cb03178711\") " pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:06 crc kubenswrapper[4848]: W0128 13:00:06.125729 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod021caff7_8415_451a_941e_20d025a0aa2b.slice/crio-87a849d313a3a0d78c26b6583c177fc5ff75db583f648e8fb70a90f7cbecd3b7 WatchSource:0}: Error finding container 87a849d313a3a0d78c26b6583c177fc5ff75db583f648e8fb70a90f7cbecd3b7: Status 404 returned error can't find the container with id 87a849d313a3a0d78c26b6583c177fc5ff75db583f648e8fb70a90f7cbecd3b7 Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.156235 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.346400 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8"] Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.348410 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn"] Jan 28 13:00:06 crc kubenswrapper[4848]: W0128 13:00:06.355442 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40955df6_8a58_487d_98fb_f8632536c72a.slice/crio-46f81c9edb662a16babb3d8fa405e5abf46d5fb644f15ed05f3fd1ea53455c94 WatchSource:0}: Error finding container 46f81c9edb662a16babb3d8fa405e5abf46d5fb644f15ed05f3fd1ea53455c94: Status 404 returned error can't find the container with id 46f81c9edb662a16babb3d8fa405e5abf46d5fb644f15ed05f3fd1ea53455c94 Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.498901 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" event={"ID":"40955df6-8a58-487d-98fb-f8632536c72a","Type":"ContainerStarted","Data":"46f81c9edb662a16babb3d8fa405e5abf46d5fb644f15ed05f3fd1ea53455c94"} Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.503688 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" event={"ID":"021caff7-8415-451a-941e-20d025a0aa2b","Type":"ContainerStarted","Data":"87a849d313a3a0d78c26b6583c177fc5ff75db583f648e8fb70a90f7cbecd3b7"} Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.529731 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" event={"ID":"25424d22-6211-41f8-9482-de5ca224224c","Type":"ContainerStarted","Data":"5cc639df77cc31d883cc01969d643946f2247cf9109df1b7d5cfcf60f12bae54"} Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.622397 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-lh2xv"] Jan 28 13:00:06 crc kubenswrapper[4848]: I0128 13:00:06.680659 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-hs6jb"] Jan 28 13:00:06 crc kubenswrapper[4848]: W0128 13:00:06.694625 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podec6c23a2_9920_4672_92c6_c44569e918d4.slice/crio-0e045fa454abab724c1801a5fd51f0a5ead8bbd5aa2fabc5a817e54f13633d30 WatchSource:0}: Error finding container 0e045fa454abab724c1801a5fd51f0a5ead8bbd5aa2fabc5a817e54f13633d30: Status 404 returned error can't find the container with id 0e045fa454abab724c1801a5fd51f0a5ead8bbd5aa2fabc5a817e54f13633d30 Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.012358 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.198064 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1917adb5-e9d0-44e9-9176-afb51d9c0f30-secret-volume\") pod \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.198113 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1917adb5-e9d0-44e9-9176-afb51d9c0f30-config-volume\") pod \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.198166 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9svsw\" (UniqueName: \"kubernetes.io/projected/1917adb5-e9d0-44e9-9176-afb51d9c0f30-kube-api-access-9svsw\") pod \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\" (UID: \"1917adb5-e9d0-44e9-9176-afb51d9c0f30\") " Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.199377 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1917adb5-e9d0-44e9-9176-afb51d9c0f30-config-volume" (OuterVolumeSpecName: "config-volume") pod "1917adb5-e9d0-44e9-9176-afb51d9c0f30" (UID: "1917adb5-e9d0-44e9-9176-afb51d9c0f30"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.209549 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1917adb5-e9d0-44e9-9176-afb51d9c0f30-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1917adb5-e9d0-44e9-9176-afb51d9c0f30" (UID: "1917adb5-e9d0-44e9-9176-afb51d9c0f30"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.209585 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1917adb5-e9d0-44e9-9176-afb51d9c0f30-kube-api-access-9svsw" (OuterVolumeSpecName: "kube-api-access-9svsw") pod "1917adb5-e9d0-44e9-9176-afb51d9c0f30" (UID: "1917adb5-e9d0-44e9-9176-afb51d9c0f30"). InnerVolumeSpecName "kube-api-access-9svsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.299858 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1917adb5-e9d0-44e9-9176-afb51d9c0f30-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.299904 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1917adb5-e9d0-44e9-9176-afb51d9c0f30-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.299918 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9svsw\" (UniqueName: \"kubernetes.io/projected/1917adb5-e9d0-44e9-9176-afb51d9c0f30-kube-api-access-9svsw\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.538021 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" event={"ID":"ff57a0c9-f0c9-4ba1-9166-37cb03178711","Type":"ContainerStarted","Data":"580efbe2e3648ba2023b72d3cb1e12f46a375b3a5a4eb8e397191023e6a224c8"} Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.539196 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" event={"ID":"ec6c23a2-9920-4672-92c6-c44569e918d4","Type":"ContainerStarted","Data":"0e045fa454abab724c1801a5fd51f0a5ead8bbd5aa2fabc5a817e54f13633d30"} Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.540686 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" event={"ID":"1917adb5-e9d0-44e9-9176-afb51d9c0f30","Type":"ContainerDied","Data":"356c70a4ab35258d1178b8d931164158b06a5e98e3b42af562391b6824c61d1e"} Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.540720 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="356c70a4ab35258d1178b8d931164158b06a5e98e3b42af562391b6824c61d1e" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.540776 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb" Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.925224 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:00:07 crc kubenswrapper[4848]: I0128 13:00:07.925354 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:00:10 crc kubenswrapper[4848]: I0128 13:00:10.057981 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 13:00:10 crc kubenswrapper[4848]: I0128 13:00:10.123923 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 13:00:12 crc kubenswrapper[4848]: I0128 13:00:12.438402 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q5wgd"] Jan 28 13:00:12 crc kubenswrapper[4848]: I0128 13:00:12.439816 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q5wgd" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="registry-server" containerID="cri-o://6f035c48e57c68c79be3ad61442cd08d5adca50597ea16644f6f416f25068230" gracePeriod=2 Jan 28 13:00:13 crc kubenswrapper[4848]: I0128 13:00:13.640884 4848 generic.go:334] "Generic (PLEG): container finished" podID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerID="6f035c48e57c68c79be3ad61442cd08d5adca50597ea16644f6f416f25068230" exitCode=0 Jan 28 13:00:13 crc kubenswrapper[4848]: I0128 13:00:13.640955 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5wgd" event={"ID":"0b15a664-642b-46b8-90c1-0d17a9cacd87","Type":"ContainerDied","Data":"6f035c48e57c68c79be3ad61442cd08d5adca50597ea16644f6f416f25068230"} Jan 28 13:00:14 crc kubenswrapper[4848]: I0128 13:00:14.946737 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.034110 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pljzl\" (UniqueName: \"kubernetes.io/projected/0b15a664-642b-46b8-90c1-0d17a9cacd87-kube-api-access-pljzl\") pod \"0b15a664-642b-46b8-90c1-0d17a9cacd87\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.034548 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-catalog-content\") pod \"0b15a664-642b-46b8-90c1-0d17a9cacd87\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.034668 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-utilities\") pod \"0b15a664-642b-46b8-90c1-0d17a9cacd87\" (UID: \"0b15a664-642b-46b8-90c1-0d17a9cacd87\") " Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.035776 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-utilities" (OuterVolumeSpecName: "utilities") pod "0b15a664-642b-46b8-90c1-0d17a9cacd87" (UID: "0b15a664-642b-46b8-90c1-0d17a9cacd87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.049212 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b15a664-642b-46b8-90c1-0d17a9cacd87-kube-api-access-pljzl" (OuterVolumeSpecName: "kube-api-access-pljzl") pod "0b15a664-642b-46b8-90c1-0d17a9cacd87" (UID: "0b15a664-642b-46b8-90c1-0d17a9cacd87"). InnerVolumeSpecName "kube-api-access-pljzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.136951 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.137002 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pljzl\" (UniqueName: \"kubernetes.io/projected/0b15a664-642b-46b8-90c1-0d17a9cacd87-kube-api-access-pljzl\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.197961 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b15a664-642b-46b8-90c1-0d17a9cacd87" (UID: "0b15a664-642b-46b8-90c1-0d17a9cacd87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.238173 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b15a664-642b-46b8-90c1-0d17a9cacd87-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.684683 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q5wgd" event={"ID":"0b15a664-642b-46b8-90c1-0d17a9cacd87","Type":"ContainerDied","Data":"334205b021ebf63f8cef16df5e84ad5ee4ad186159ecb37935cce8f6e8eba0c8"} Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.684743 4848 scope.go:117] "RemoveContainer" containerID="6f035c48e57c68c79be3ad61442cd08d5adca50597ea16644f6f416f25068230" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.684865 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q5wgd" Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.741340 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q5wgd"] Jan 28 13:00:15 crc kubenswrapper[4848]: I0128 13:00:15.758701 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q5wgd"] Jan 28 13:00:16 crc kubenswrapper[4848]: I0128 13:00:16.857856 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" path="/var/lib/kubelet/pods/0b15a664-642b-46b8-90c1-0d17a9cacd87/volumes" Jan 28 13:00:20 crc kubenswrapper[4848]: I0128 13:00:20.952629 4848 scope.go:117] "RemoveContainer" containerID="75a5f1e41c1d19526fb65d4c9bfb280ba5c78852092bbe1ef8e864366098e20b" Jan 28 13:00:20 crc kubenswrapper[4848]: I0128 13:00:20.977346 4848 scope.go:117] "RemoveContainer" containerID="2a44714c4f1980475ba9eeb010c7c87b87a3bf15d47d456be6124b4153e7dc7f" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.732775 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" event={"ID":"021caff7-8415-451a-941e-20d025a0aa2b","Type":"ContainerStarted","Data":"45c0f2ba89aaa530e6ac1ea4e6eff5676312636ee89d70153b5bdfe9fa6548f1"} Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.734674 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" event={"ID":"ff57a0c9-f0c9-4ba1-9166-37cb03178711","Type":"ContainerStarted","Data":"0e99c64e0cec0a369cbe9df889a274e5cee31e0d79710229a8013f01786622b0"} Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.734852 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.737578 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" event={"ID":"25424d22-6211-41f8-9482-de5ca224224c","Type":"ContainerStarted","Data":"7d847658e2fba30e0a06ecf83bcaa14456cff3d75cbab9b6904603519280759f"} Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.739026 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" event={"ID":"ec6c23a2-9920-4672-92c6-c44569e918d4","Type":"ContainerStarted","Data":"18ddb048bdd4c07ec857720a02a6ad05b8d29c69a5049baee96226a5d26287ab"} Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.739120 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.740842 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" event={"ID":"40955df6-8a58-487d-98fb-f8632536c72a","Type":"ContainerStarted","Data":"da593a4a8dd383ace50527b721813f5eb137a0f3b8491c651b20e4f42d7e51ff"} Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.770715 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.775039 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-pwsdh" podStartSLOduration=1.951783177 podStartE2EDuration="16.775012799s" podCreationTimestamp="2026-01-28 13:00:05 +0000 UTC" firstStartedPulling="2026-01-28 13:00:06.136699198 +0000 UTC m=+833.048916236" lastFinishedPulling="2026-01-28 13:00:20.95992882 +0000 UTC m=+847.872145858" observedRunningTime="2026-01-28 13:00:21.765078928 +0000 UTC m=+848.677295966" watchObservedRunningTime="2026-01-28 13:00:21.775012799 +0000 UTC m=+848.687229837" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.792330 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8" podStartSLOduration=2.142520229 podStartE2EDuration="16.792311073s" podCreationTimestamp="2026-01-28 13:00:05 +0000 UTC" firstStartedPulling="2026-01-28 13:00:06.364366739 +0000 UTC m=+833.276583777" lastFinishedPulling="2026-01-28 13:00:21.014157583 +0000 UTC m=+847.926374621" observedRunningTime="2026-01-28 13:00:21.791058068 +0000 UTC m=+848.703275106" watchObservedRunningTime="2026-01-28 13:00:21.792311073 +0000 UTC m=+848.704528111" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.824232 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn" podStartSLOduration=2.226611647 podStartE2EDuration="16.824201084s" podCreationTimestamp="2026-01-28 13:00:05 +0000 UTC" firstStartedPulling="2026-01-28 13:00:06.361329545 +0000 UTC m=+833.273546583" lastFinishedPulling="2026-01-28 13:00:20.958918982 +0000 UTC m=+847.871136020" observedRunningTime="2026-01-28 13:00:21.822770255 +0000 UTC m=+848.734987303" watchObservedRunningTime="2026-01-28 13:00:21.824201084 +0000 UTC m=+848.736418112" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.883997 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-hs6jb" podStartSLOduration=2.601025804 podStartE2EDuration="16.883972279s" podCreationTimestamp="2026-01-28 13:00:05 +0000 UTC" firstStartedPulling="2026-01-28 13:00:06.697566868 +0000 UTC m=+833.609783906" lastFinishedPulling="2026-01-28 13:00:20.980513343 +0000 UTC m=+847.892730381" observedRunningTime="2026-01-28 13:00:21.86279523 +0000 UTC m=+848.775012268" watchObservedRunningTime="2026-01-28 13:00:21.883972279 +0000 UTC m=+848.796189317" Jan 28 13:00:21 crc kubenswrapper[4848]: I0128 13:00:21.884138 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" podStartSLOduration=2.5813923020000002 podStartE2EDuration="16.884134123s" podCreationTimestamp="2026-01-28 13:00:05 +0000 UTC" firstStartedPulling="2026-01-28 13:00:06.656330746 +0000 UTC m=+833.568547784" lastFinishedPulling="2026-01-28 13:00:20.959072567 +0000 UTC m=+847.871289605" observedRunningTime="2026-01-28 13:00:21.880433162 +0000 UTC m=+848.792650200" watchObservedRunningTime="2026-01-28 13:00:21.884134123 +0000 UTC m=+848.796351161" Jan 28 13:00:26 crc kubenswrapper[4848]: I0128 13:00:26.159688 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-lh2xv" Jan 28 13:00:37 crc kubenswrapper[4848]: I0128 13:00:37.925105 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:00:37 crc kubenswrapper[4848]: I0128 13:00:37.926001 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:00:37 crc kubenswrapper[4848]: I0128 13:00:37.926067 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:00:37 crc kubenswrapper[4848]: I0128 13:00:37.926889 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b3d7bb96bb73c79bf1b8f4103851f2633f3719121e6446c4bbfd8ad6b1a1178"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:00:37 crc kubenswrapper[4848]: I0128 13:00:37.926961 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://9b3d7bb96bb73c79bf1b8f4103851f2633f3719121e6446c4bbfd8ad6b1a1178" gracePeriod=600 Jan 28 13:00:40 crc kubenswrapper[4848]: I0128 13:00:40.875716 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="9b3d7bb96bb73c79bf1b8f4103851f2633f3719121e6446c4bbfd8ad6b1a1178" exitCode=0 Jan 28 13:00:40 crc kubenswrapper[4848]: I0128 13:00:40.875811 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"9b3d7bb96bb73c79bf1b8f4103851f2633f3719121e6446c4bbfd8ad6b1a1178"} Jan 28 13:00:40 crc kubenswrapper[4848]: I0128 13:00:40.876839 4848 scope.go:117] "RemoveContainer" containerID="6e933c1f02c6d3091e38630936db38332fbcb3f6fc3c2d979f20dc55ece773e3" Jan 28 13:00:41 crc kubenswrapper[4848]: I0128 13:00:41.886391 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"875a982e7db5cc44931d699a4c51480a5860a252ccb155a317028cb1da4c99e1"} Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.707707 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6"] Jan 28 13:00:44 crc kubenswrapper[4848]: E0128 13:00:44.708360 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="extract-utilities" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.708378 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="extract-utilities" Jan 28 13:00:44 crc kubenswrapper[4848]: E0128 13:00:44.708398 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1917adb5-e9d0-44e9-9176-afb51d9c0f30" containerName="collect-profiles" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.708405 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="1917adb5-e9d0-44e9-9176-afb51d9c0f30" containerName="collect-profiles" Jan 28 13:00:44 crc kubenswrapper[4848]: E0128 13:00:44.708420 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="extract-content" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.708431 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="extract-content" Jan 28 13:00:44 crc kubenswrapper[4848]: E0128 13:00:44.708446 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="registry-server" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.708453 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="registry-server" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.708575 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b15a664-642b-46b8-90c1-0d17a9cacd87" containerName="registry-server" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.708596 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="1917adb5-e9d0-44e9-9176-afb51d9c0f30" containerName="collect-profiles" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.709630 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.712891 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.723606 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6"] Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.781072 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkzqh\" (UniqueName: \"kubernetes.io/projected/50d9a50c-8ae1-4157-8c81-c32ba250030b-kube-api-access-mkzqh\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.781141 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.781397 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.882873 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.882947 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkzqh\" (UniqueName: \"kubernetes.io/projected/50d9a50c-8ae1-4157-8c81-c32ba250030b-kube-api-access-mkzqh\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.883010 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.883684 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.883718 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:44 crc kubenswrapper[4848]: I0128 13:00:44.909368 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkzqh\" (UniqueName: \"kubernetes.io/projected/50d9a50c-8ae1-4157-8c81-c32ba250030b-kube-api-access-mkzqh\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:45 crc kubenswrapper[4848]: I0128 13:00:45.026273 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:45 crc kubenswrapper[4848]: I0128 13:00:45.270928 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6"] Jan 28 13:00:45 crc kubenswrapper[4848]: I0128 13:00:45.914189 4848 generic.go:334] "Generic (PLEG): container finished" podID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerID="07facc967ce914a599fe97896970b21db2784bcea631b6eee36e20136d4bdca0" exitCode=0 Jan 28 13:00:45 crc kubenswrapper[4848]: I0128 13:00:45.914311 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" event={"ID":"50d9a50c-8ae1-4157-8c81-c32ba250030b","Type":"ContainerDied","Data":"07facc967ce914a599fe97896970b21db2784bcea631b6eee36e20136d4bdca0"} Jan 28 13:00:45 crc kubenswrapper[4848]: I0128 13:00:45.916223 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" event={"ID":"50d9a50c-8ae1-4157-8c81-c32ba250030b","Type":"ContainerStarted","Data":"a6304a9a29dfcae1525c29a979a1cc8b0aee988cbe9aebe83d67c50ac191b369"} Jan 28 13:00:48 crc kubenswrapper[4848]: I0128 13:00:48.938691 4848 generic.go:334] "Generic (PLEG): container finished" podID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerID="04abe0843e945cb4495084307571c34ce8f4403b3e6b6dc93ee9f84ba46824f7" exitCode=0 Jan 28 13:00:48 crc kubenswrapper[4848]: I0128 13:00:48.938917 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" event={"ID":"50d9a50c-8ae1-4157-8c81-c32ba250030b","Type":"ContainerDied","Data":"04abe0843e945cb4495084307571c34ce8f4403b3e6b6dc93ee9f84ba46824f7"} Jan 28 13:00:49 crc kubenswrapper[4848]: I0128 13:00:49.950582 4848 generic.go:334] "Generic (PLEG): container finished" podID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerID="7d63a78ddedc3df1774ffefd16f5f7bb0626f80bed63a82c54672ca9d9b50ca8" exitCode=0 Jan 28 13:00:49 crc kubenswrapper[4848]: I0128 13:00:49.950652 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" event={"ID":"50d9a50c-8ae1-4157-8c81-c32ba250030b","Type":"ContainerDied","Data":"7d63a78ddedc3df1774ffefd16f5f7bb0626f80bed63a82c54672ca9d9b50ca8"} Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.286750 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.392819 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkzqh\" (UniqueName: \"kubernetes.io/projected/50d9a50c-8ae1-4157-8c81-c32ba250030b-kube-api-access-mkzqh\") pod \"50d9a50c-8ae1-4157-8c81-c32ba250030b\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.393319 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-util\") pod \"50d9a50c-8ae1-4157-8c81-c32ba250030b\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.393389 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-bundle\") pod \"50d9a50c-8ae1-4157-8c81-c32ba250030b\" (UID: \"50d9a50c-8ae1-4157-8c81-c32ba250030b\") " Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.394501 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-bundle" (OuterVolumeSpecName: "bundle") pod "50d9a50c-8ae1-4157-8c81-c32ba250030b" (UID: "50d9a50c-8ae1-4157-8c81-c32ba250030b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.401660 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50d9a50c-8ae1-4157-8c81-c32ba250030b-kube-api-access-mkzqh" (OuterVolumeSpecName: "kube-api-access-mkzqh") pod "50d9a50c-8ae1-4157-8c81-c32ba250030b" (UID: "50d9a50c-8ae1-4157-8c81-c32ba250030b"). InnerVolumeSpecName "kube-api-access-mkzqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.406529 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-util" (OuterVolumeSpecName: "util") pod "50d9a50c-8ae1-4157-8c81-c32ba250030b" (UID: "50d9a50c-8ae1-4157-8c81-c32ba250030b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.495076 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkzqh\" (UniqueName: \"kubernetes.io/projected/50d9a50c-8ae1-4157-8c81-c32ba250030b-kube-api-access-mkzqh\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.495132 4848 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-util\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.495152 4848 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/50d9a50c-8ae1-4157-8c81-c32ba250030b-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.971664 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" event={"ID":"50d9a50c-8ae1-4157-8c81-c32ba250030b","Type":"ContainerDied","Data":"a6304a9a29dfcae1525c29a979a1cc8b0aee988cbe9aebe83d67c50ac191b369"} Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.971728 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6304a9a29dfcae1525c29a979a1cc8b0aee988cbe9aebe83d67c50ac191b369" Jan 28 13:00:51 crc kubenswrapper[4848]: I0128 13:00:51.971758 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.037950 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-rbzxj"] Jan 28 13:00:54 crc kubenswrapper[4848]: E0128 13:00:54.038317 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerName="util" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.038339 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerName="util" Jan 28 13:00:54 crc kubenswrapper[4848]: E0128 13:00:54.038354 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerName="pull" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.038362 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerName="pull" Jan 28 13:00:54 crc kubenswrapper[4848]: E0128 13:00:54.038371 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerName="extract" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.038379 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerName="extract" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.038506 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="50d9a50c-8ae1-4157-8c81-c32ba250030b" containerName="extract" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.039162 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.043945 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.044325 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-7kbcf" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.056423 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.077291 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-rbzxj"] Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.177718 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpb4h\" (UniqueName: \"kubernetes.io/projected/829bde53-8549-411f-a1ff-a00769198b1c-kube-api-access-fpb4h\") pod \"nmstate-operator-646758c888-rbzxj\" (UID: \"829bde53-8549-411f-a1ff-a00769198b1c\") " pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.279233 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpb4h\" (UniqueName: \"kubernetes.io/projected/829bde53-8549-411f-a1ff-a00769198b1c-kube-api-access-fpb4h\") pod \"nmstate-operator-646758c888-rbzxj\" (UID: \"829bde53-8549-411f-a1ff-a00769198b1c\") " pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.299112 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpb4h\" (UniqueName: \"kubernetes.io/projected/829bde53-8549-411f-a1ff-a00769198b1c-kube-api-access-fpb4h\") pod \"nmstate-operator-646758c888-rbzxj\" (UID: \"829bde53-8549-411f-a1ff-a00769198b1c\") " pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.369778 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.710039 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-rbzxj"] Jan 28 13:00:54 crc kubenswrapper[4848]: I0128 13:00:54.992272 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" event={"ID":"829bde53-8549-411f-a1ff-a00769198b1c","Type":"ContainerStarted","Data":"5ec60f3630952d8ce2db9a8adf02b081e59ab02f24d9dcd4f05eaa2822ddac1f"} Jan 28 13:00:58 crc kubenswrapper[4848]: I0128 13:00:58.012287 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" event={"ID":"829bde53-8549-411f-a1ff-a00769198b1c","Type":"ContainerStarted","Data":"23cfde4ed23c0da84ba7b378a6bb7758440d49cf4fa5bf9504ac066b4b1e3132"} Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.083419 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-rbzxj" podStartSLOduration=2.780761107 podStartE2EDuration="5.083389895s" podCreationTimestamp="2026-01-28 13:00:54 +0000 UTC" firstStartedPulling="2026-01-28 13:00:54.713234329 +0000 UTC m=+881.625451357" lastFinishedPulling="2026-01-28 13:00:57.015863107 +0000 UTC m=+883.928080145" observedRunningTime="2026-01-28 13:00:58.035478919 +0000 UTC m=+884.947695957" watchObservedRunningTime="2026-01-28 13:00:59.083389895 +0000 UTC m=+885.995606933" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.084564 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-7gp2j"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.085962 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.095226 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-8t8mv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.098227 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-7gp2j"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.107589 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-w28lf"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.108706 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.126040 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.127186 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.136391 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.140769 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.266601 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65rdg\" (UniqueName: \"kubernetes.io/projected/88ba0124-029f-4b9c-8479-2ee4c089bcbb-kube-api-access-65rdg\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.266679 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/38d465b1-a9c1-4007-8406-9fd77ec0ead4-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-52stv\" (UID: \"38d465b1-a9c1-4007-8406-9fd77ec0ead4\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.266706 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwvt4\" (UniqueName: \"kubernetes.io/projected/b67bb5ba-7747-475f-a3c5-de2b7df72934-kube-api-access-wwvt4\") pod \"nmstate-metrics-54757c584b-7gp2j\" (UID: \"b67bb5ba-7747-475f-a3c5-de2b7df72934\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.266755 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-dbus-socket\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.266780 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2m7b\" (UniqueName: \"kubernetes.io/projected/38d465b1-a9c1-4007-8406-9fd77ec0ead4-kube-api-access-d2m7b\") pod \"nmstate-webhook-8474b5b9d8-52stv\" (UID: \"38d465b1-a9c1-4007-8406-9fd77ec0ead4\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.266886 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-nmstate-lock\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.266935 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-ovs-socket\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.270852 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.271720 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.280569 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.280854 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.281220 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.281394 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-q8fqh" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368212 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-dbus-socket\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368300 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2m7b\" (UniqueName: \"kubernetes.io/projected/38d465b1-a9c1-4007-8406-9fd77ec0ead4-kube-api-access-d2m7b\") pod \"nmstate-webhook-8474b5b9d8-52stv\" (UID: \"38d465b1-a9c1-4007-8406-9fd77ec0ead4\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368330 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-ovs-socket\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368361 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-nmstate-lock\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368399 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65rdg\" (UniqueName: \"kubernetes.io/projected/88ba0124-029f-4b9c-8479-2ee4c089bcbb-kube-api-access-65rdg\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368440 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/38d465b1-a9c1-4007-8406-9fd77ec0ead4-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-52stv\" (UID: \"38d465b1-a9c1-4007-8406-9fd77ec0ead4\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368458 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwvt4\" (UniqueName: \"kubernetes.io/projected/b67bb5ba-7747-475f-a3c5-de2b7df72934-kube-api-access-wwvt4\") pod \"nmstate-metrics-54757c584b-7gp2j\" (UID: \"b67bb5ba-7747-475f-a3c5-de2b7df72934\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368463 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-ovs-socket\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368575 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-nmstate-lock\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.368716 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/88ba0124-029f-4b9c-8479-2ee4c089bcbb-dbus-socket\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: E0128 13:00:59.368790 4848 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 28 13:00:59 crc kubenswrapper[4848]: E0128 13:00:59.368941 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/38d465b1-a9c1-4007-8406-9fd77ec0ead4-tls-key-pair podName:38d465b1-a9c1-4007-8406-9fd77ec0ead4 nodeName:}" failed. No retries permitted until 2026-01-28 13:00:59.868901503 +0000 UTC m=+886.781118701 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/38d465b1-a9c1-4007-8406-9fd77ec0ead4-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-52stv" (UID: "38d465b1-a9c1-4007-8406-9fd77ec0ead4") : secret "openshift-nmstate-webhook" not found Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.394447 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65rdg\" (UniqueName: \"kubernetes.io/projected/88ba0124-029f-4b9c-8479-2ee4c089bcbb-kube-api-access-65rdg\") pod \"nmstate-handler-w28lf\" (UID: \"88ba0124-029f-4b9c-8479-2ee4c089bcbb\") " pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.397766 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2m7b\" (UniqueName: \"kubernetes.io/projected/38d465b1-a9c1-4007-8406-9fd77ec0ead4-kube-api-access-d2m7b\") pod \"nmstate-webhook-8474b5b9d8-52stv\" (UID: \"38d465b1-a9c1-4007-8406-9fd77ec0ead4\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.400348 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwvt4\" (UniqueName: \"kubernetes.io/projected/b67bb5ba-7747-475f-a3c5-de2b7df72934-kube-api-access-wwvt4\") pod \"nmstate-metrics-54757c584b-7gp2j\" (UID: \"b67bb5ba-7747-475f-a3c5-de2b7df72934\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.405191 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.437791 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.469937 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae62f49e-2ce4-4e48-b803-2b46a5319273-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.470077 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2cml\" (UniqueName: \"kubernetes.io/projected/ae62f49e-2ce4-4e48-b803-2b46a5319273-kube-api-access-r2cml\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.470104 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ae62f49e-2ce4-4e48-b803-2b46a5319273-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.535181 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-56868ccb7c-7j5qq"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.536633 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.571745 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae62f49e-2ce4-4e48-b803-2b46a5319273-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.571868 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2cml\" (UniqueName: \"kubernetes.io/projected/ae62f49e-2ce4-4e48-b803-2b46a5319273-kube-api-access-r2cml\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.571895 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ae62f49e-2ce4-4e48-b803-2b46a5319273-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.573023 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ae62f49e-2ce4-4e48-b803-2b46a5319273-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: E0128 13:00:59.573134 4848 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 28 13:00:59 crc kubenswrapper[4848]: E0128 13:00:59.573186 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae62f49e-2ce4-4e48-b803-2b46a5319273-plugin-serving-cert podName:ae62f49e-2ce4-4e48-b803-2b46a5319273 nodeName:}" failed. No retries permitted until 2026-01-28 13:01:00.073170849 +0000 UTC m=+886.985387887 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/ae62f49e-2ce4-4e48-b803-2b46a5319273-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-76xvb" (UID: "ae62f49e-2ce4-4e48-b803-2b46a5319273") : secret "plugin-serving-cert" not found Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.575620 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-56868ccb7c-7j5qq"] Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.616700 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2cml\" (UniqueName: \"kubernetes.io/projected/ae62f49e-2ce4-4e48-b803-2b46a5319273-kube-api-access-r2cml\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.673163 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv5sf\" (UniqueName: \"kubernetes.io/projected/074146bb-a996-4ea7-94f8-17f5ae959cc4-kube-api-access-rv5sf\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.673209 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-config\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.673229 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-trusted-ca-bundle\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.673267 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-oauth-config\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.673294 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-service-ca\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.673315 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-serving-cert\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.673340 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-oauth-serving-cert\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.775187 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-oauth-serving-cert\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.775356 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv5sf\" (UniqueName: \"kubernetes.io/projected/074146bb-a996-4ea7-94f8-17f5ae959cc4-kube-api-access-rv5sf\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.775387 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-config\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.775412 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-trusted-ca-bundle\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.775438 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-oauth-config\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.775474 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-service-ca\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.775507 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-serving-cert\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.776638 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-config\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.776753 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-service-ca\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.777582 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-oauth-serving-cert\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.778834 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/074146bb-a996-4ea7-94f8-17f5ae959cc4-trusted-ca-bundle\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.779899 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-serving-cert\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.781326 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/074146bb-a996-4ea7-94f8-17f5ae959cc4-console-oauth-config\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.792291 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv5sf\" (UniqueName: \"kubernetes.io/projected/074146bb-a996-4ea7-94f8-17f5ae959cc4-kube-api-access-rv5sf\") pod \"console-56868ccb7c-7j5qq\" (UID: \"074146bb-a996-4ea7-94f8-17f5ae959cc4\") " pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.875074 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.878509 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/38d465b1-a9c1-4007-8406-9fd77ec0ead4-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-52stv\" (UID: \"38d465b1-a9c1-4007-8406-9fd77ec0ead4\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.882086 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/38d465b1-a9c1-4007-8406-9fd77ec0ead4-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-52stv\" (UID: \"38d465b1-a9c1-4007-8406-9fd77ec0ead4\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:00:59 crc kubenswrapper[4848]: I0128 13:00:59.930806 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-7gp2j"] Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.030115 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-w28lf" event={"ID":"88ba0124-029f-4b9c-8479-2ee4c089bcbb","Type":"ContainerStarted","Data":"93c18772ee87a45a4d0270a85354dd8559429311415c32eaf017a8dab3e8f558"} Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.035506 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" event={"ID":"b67bb5ba-7747-475f-a3c5-de2b7df72934","Type":"ContainerStarted","Data":"2f2eddbcb8864aa459aab437907203b904a46b9664c80a79ca9ce7aedac2a44a"} Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.051944 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.086503 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae62f49e-2ce4-4e48-b803-2b46a5319273-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.094410 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae62f49e-2ce4-4e48-b803-2b46a5319273-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-76xvb\" (UID: \"ae62f49e-2ce4-4e48-b803-2b46a5319273\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.135930 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-56868ccb7c-7j5qq"] Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.216704 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.431048 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv"] Jan 28 13:01:00 crc kubenswrapper[4848]: I0128 13:01:00.707805 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb"] Jan 28 13:01:00 crc kubenswrapper[4848]: W0128 13:01:00.743140 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae62f49e_2ce4_4e48_b803_2b46a5319273.slice/crio-a988432114d5c5278a5a0d8fa03e562a4bb882d7f6b48171c80b94bb57504662 WatchSource:0}: Error finding container a988432114d5c5278a5a0d8fa03e562a4bb882d7f6b48171c80b94bb57504662: Status 404 returned error can't find the container with id a988432114d5c5278a5a0d8fa03e562a4bb882d7f6b48171c80b94bb57504662 Jan 28 13:01:01 crc kubenswrapper[4848]: I0128 13:01:01.047342 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56868ccb7c-7j5qq" event={"ID":"074146bb-a996-4ea7-94f8-17f5ae959cc4","Type":"ContainerStarted","Data":"89ba98edd7fc44db8f65f12f81678d6d55e2d7205e155837d3bcb77fc6d222ca"} Jan 28 13:01:01 crc kubenswrapper[4848]: I0128 13:01:01.047400 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56868ccb7c-7j5qq" event={"ID":"074146bb-a996-4ea7-94f8-17f5ae959cc4","Type":"ContainerStarted","Data":"a8f19cec5feecfbb7d6049490a12f43cc32bd6a42b975f4edbda31ed41250075"} Jan 28 13:01:01 crc kubenswrapper[4848]: I0128 13:01:01.052442 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" event={"ID":"ae62f49e-2ce4-4e48-b803-2b46a5319273","Type":"ContainerStarted","Data":"a988432114d5c5278a5a0d8fa03e562a4bb882d7f6b48171c80b94bb57504662"} Jan 28 13:01:01 crc kubenswrapper[4848]: I0128 13:01:01.053839 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" event={"ID":"38d465b1-a9c1-4007-8406-9fd77ec0ead4","Type":"ContainerStarted","Data":"da5d93796a18530dcbf734dd9f7c9988969d19e678522301ccbb9acce78a7611"} Jan 28 13:01:01 crc kubenswrapper[4848]: I0128 13:01:01.070364 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-56868ccb7c-7j5qq" podStartSLOduration=2.07031282 podStartE2EDuration="2.07031282s" podCreationTimestamp="2026-01-28 13:00:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:01:01.06921156 +0000 UTC m=+887.981428618" watchObservedRunningTime="2026-01-28 13:01:01.07031282 +0000 UTC m=+887.982529858" Jan 28 13:01:03 crc kubenswrapper[4848]: I0128 13:01:03.080869 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-w28lf" event={"ID":"88ba0124-029f-4b9c-8479-2ee4c089bcbb","Type":"ContainerStarted","Data":"70fddad54de80949bf1e20990d2b9ebcb5061e49d782f71cfd04a7780eb8b960"} Jan 28 13:01:03 crc kubenswrapper[4848]: I0128 13:01:03.081375 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:01:03 crc kubenswrapper[4848]: I0128 13:01:03.085983 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" event={"ID":"b67bb5ba-7747-475f-a3c5-de2b7df72934","Type":"ContainerStarted","Data":"58c26dc9885c1af8bd83214973bade5660d6ad99b44d2197205d3820158e814a"} Jan 28 13:01:03 crc kubenswrapper[4848]: I0128 13:01:03.098760 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" event={"ID":"38d465b1-a9c1-4007-8406-9fd77ec0ead4","Type":"ContainerStarted","Data":"4b5a35c114215505f67aa893cd2781783dd359b63a5a04fa506abd43f948d2c8"} Jan 28 13:01:03 crc kubenswrapper[4848]: I0128 13:01:03.098973 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:01:03 crc kubenswrapper[4848]: I0128 13:01:03.102641 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-w28lf" podStartSLOduration=0.957544961 podStartE2EDuration="4.102616485s" podCreationTimestamp="2026-01-28 13:00:59 +0000 UTC" firstStartedPulling="2026-01-28 13:00:59.473932316 +0000 UTC m=+886.386149354" lastFinishedPulling="2026-01-28 13:01:02.61900384 +0000 UTC m=+889.531220878" observedRunningTime="2026-01-28 13:01:03.098840002 +0000 UTC m=+890.011057030" watchObservedRunningTime="2026-01-28 13:01:03.102616485 +0000 UTC m=+890.014833523" Jan 28 13:01:03 crc kubenswrapper[4848]: I0128 13:01:03.125609 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" podStartSLOduration=1.9637688660000001 podStartE2EDuration="4.125584013s" podCreationTimestamp="2026-01-28 13:00:59 +0000 UTC" firstStartedPulling="2026-01-28 13:01:00.473591812 +0000 UTC m=+887.385808850" lastFinishedPulling="2026-01-28 13:01:02.635406959 +0000 UTC m=+889.547623997" observedRunningTime="2026-01-28 13:01:03.117974205 +0000 UTC m=+890.030191253" watchObservedRunningTime="2026-01-28 13:01:03.125584013 +0000 UTC m=+890.037801051" Jan 28 13:01:05 crc kubenswrapper[4848]: I0128 13:01:05.129051 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" event={"ID":"ae62f49e-2ce4-4e48-b803-2b46a5319273","Type":"ContainerStarted","Data":"15f52f462f031a21797937859f213f70ae170e81c766c9bff13d09b760ce183e"} Jan 28 13:01:05 crc kubenswrapper[4848]: I0128 13:01:05.147712 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-76xvb" podStartSLOduration=2.8989677499999997 podStartE2EDuration="6.14768407s" podCreationTimestamp="2026-01-28 13:00:59 +0000 UTC" firstStartedPulling="2026-01-28 13:01:00.746896976 +0000 UTC m=+887.659114024" lastFinishedPulling="2026-01-28 13:01:03.995613296 +0000 UTC m=+890.907830344" observedRunningTime="2026-01-28 13:01:05.147118125 +0000 UTC m=+892.059335163" watchObservedRunningTime="2026-01-28 13:01:05.14768407 +0000 UTC m=+892.059901108" Jan 28 13:01:07 crc kubenswrapper[4848]: I0128 13:01:07.146307 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" event={"ID":"b67bb5ba-7747-475f-a3c5-de2b7df72934","Type":"ContainerStarted","Data":"47873bb00414d8adfb6d609ca3179282335643fe3f2e57c875278685fcf46ab9"} Jan 28 13:01:07 crc kubenswrapper[4848]: I0128 13:01:07.169447 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-7gp2j" podStartSLOduration=1.658854949 podStartE2EDuration="8.169417457s" podCreationTimestamp="2026-01-28 13:00:59 +0000 UTC" firstStartedPulling="2026-01-28 13:00:59.931089817 +0000 UTC m=+886.843306855" lastFinishedPulling="2026-01-28 13:01:06.441652315 +0000 UTC m=+893.353869363" observedRunningTime="2026-01-28 13:01:07.164177023 +0000 UTC m=+894.076394061" watchObservedRunningTime="2026-01-28 13:01:07.169417457 +0000 UTC m=+894.081634495" Jan 28 13:01:09 crc kubenswrapper[4848]: I0128 13:01:09.473777 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-w28lf" Jan 28 13:01:09 crc kubenswrapper[4848]: I0128 13:01:09.875489 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:01:09 crc kubenswrapper[4848]: I0128 13:01:09.875583 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:01:09 crc kubenswrapper[4848]: I0128 13:01:09.880786 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:01:10 crc kubenswrapper[4848]: I0128 13:01:10.172746 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-56868ccb7c-7j5qq" Jan 28 13:01:10 crc kubenswrapper[4848]: I0128 13:01:10.250855 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5dbnv"] Jan 28 13:01:20 crc kubenswrapper[4848]: I0128 13:01:20.061096 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-52stv" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.799625 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d"] Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.801500 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.804069 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.811793 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d"] Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.849473 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2wrw\" (UniqueName: \"kubernetes.io/projected/8a8f5de6-d418-43d7-855b-c4773b3dc691-kube-api-access-c2wrw\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.849537 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.849612 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.951550 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2wrw\" (UniqueName: \"kubernetes.io/projected/8a8f5de6-d418-43d7-855b-c4773b3dc691-kube-api-access-c2wrw\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.951646 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.951754 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.952292 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.952323 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:34 crc kubenswrapper[4848]: I0128 13:01:34.997345 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2wrw\" (UniqueName: \"kubernetes.io/projected/8a8f5de6-d418-43d7-855b-c4773b3dc691-kube-api-access-c2wrw\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:35 crc kubenswrapper[4848]: I0128 13:01:35.119568 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:35 crc kubenswrapper[4848]: I0128 13:01:35.299858 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5dbnv" podUID="c3bd5c0e-2656-4237-a9ab-e4de84101595" containerName="console" containerID="cri-o://67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521" gracePeriod=15 Jan 28 13:01:35 crc kubenswrapper[4848]: I0128 13:01:35.644152 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d"] Jan 28 13:01:35 crc kubenswrapper[4848]: I0128 13:01:35.959838 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5dbnv_c3bd5c0e-2656-4237-a9ab-e4de84101595/console/0.log" Jan 28 13:01:35 crc kubenswrapper[4848]: I0128 13:01:35.960401 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.077612 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-service-ca\") pod \"c3bd5c0e-2656-4237-a9ab-e4de84101595\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.077693 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-trusted-ca-bundle\") pod \"c3bd5c0e-2656-4237-a9ab-e4de84101595\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.077765 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-serving-cert\") pod \"c3bd5c0e-2656-4237-a9ab-e4de84101595\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.077820 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5q52\" (UniqueName: \"kubernetes.io/projected/c3bd5c0e-2656-4237-a9ab-e4de84101595-kube-api-access-f5q52\") pod \"c3bd5c0e-2656-4237-a9ab-e4de84101595\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.077837 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-config\") pod \"c3bd5c0e-2656-4237-a9ab-e4de84101595\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.078802 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-service-ca" (OuterVolumeSpecName: "service-ca") pod "c3bd5c0e-2656-4237-a9ab-e4de84101595" (UID: "c3bd5c0e-2656-4237-a9ab-e4de84101595"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.078943 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-config" (OuterVolumeSpecName: "console-config") pod "c3bd5c0e-2656-4237-a9ab-e4de84101595" (UID: "c3bd5c0e-2656-4237-a9ab-e4de84101595"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.079165 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-oauth-serving-cert\") pod \"c3bd5c0e-2656-4237-a9ab-e4de84101595\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.079241 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-oauth-config\") pod \"c3bd5c0e-2656-4237-a9ab-e4de84101595\" (UID: \"c3bd5c0e-2656-4237-a9ab-e4de84101595\") " Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.079536 4848 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.079551 4848 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.079630 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "c3bd5c0e-2656-4237-a9ab-e4de84101595" (UID: "c3bd5c0e-2656-4237-a9ab-e4de84101595"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.079816 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "c3bd5c0e-2656-4237-a9ab-e4de84101595" (UID: "c3bd5c0e-2656-4237-a9ab-e4de84101595"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.085182 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3bd5c0e-2656-4237-a9ab-e4de84101595-kube-api-access-f5q52" (OuterVolumeSpecName: "kube-api-access-f5q52") pod "c3bd5c0e-2656-4237-a9ab-e4de84101595" (UID: "c3bd5c0e-2656-4237-a9ab-e4de84101595"). InnerVolumeSpecName "kube-api-access-f5q52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.085662 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "c3bd5c0e-2656-4237-a9ab-e4de84101595" (UID: "c3bd5c0e-2656-4237-a9ab-e4de84101595"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.085697 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "c3bd5c0e-2656-4237-a9ab-e4de84101595" (UID: "c3bd5c0e-2656-4237-a9ab-e4de84101595"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.180351 4848 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.180794 4848 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.180806 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5q52\" (UniqueName: \"kubernetes.io/projected/c3bd5c0e-2656-4237-a9ab-e4de84101595-kube-api-access-f5q52\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.180818 4848 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c3bd5c0e-2656-4237-a9ab-e4de84101595-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.180829 4848 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c3bd5c0e-2656-4237-a9ab-e4de84101595-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:36 crc kubenswrapper[4848]: E0128 13:01:36.256480 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a8f5de6_d418_43d7_855b_c4773b3dc691.slice/crio-conmon-3eb7061c7a00d0006cfa8d1b5df1b1bfccb242744dbbda37a3c66714b4d2f76d.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.359956 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5dbnv_c3bd5c0e-2656-4237-a9ab-e4de84101595/console/0.log" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.360018 4848 generic.go:334] "Generic (PLEG): container finished" podID="c3bd5c0e-2656-4237-a9ab-e4de84101595" containerID="67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521" exitCode=2 Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.360097 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5dbnv" event={"ID":"c3bd5c0e-2656-4237-a9ab-e4de84101595","Type":"ContainerDied","Data":"67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521"} Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.360135 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5dbnv" event={"ID":"c3bd5c0e-2656-4237-a9ab-e4de84101595","Type":"ContainerDied","Data":"3f260dcacec5987142fccfdcca394c9fa6390b46bc7ef4e9346fbff371371653"} Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.360156 4848 scope.go:117] "RemoveContainer" containerID="67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.360188 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5dbnv" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.362142 4848 generic.go:334] "Generic (PLEG): container finished" podID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerID="3eb7061c7a00d0006cfa8d1b5df1b1bfccb242744dbbda37a3c66714b4d2f76d" exitCode=0 Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.362193 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" event={"ID":"8a8f5de6-d418-43d7-855b-c4773b3dc691","Type":"ContainerDied","Data":"3eb7061c7a00d0006cfa8d1b5df1b1bfccb242744dbbda37a3c66714b4d2f76d"} Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.362231 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" event={"ID":"8a8f5de6-d418-43d7-855b-c4773b3dc691","Type":"ContainerStarted","Data":"0a57bd47814aed85f0fc0820de333f810bc5c2deb4209e878a00b0888fea3aae"} Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.410380 4848 scope.go:117] "RemoveContainer" containerID="67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521" Jan 28 13:01:36 crc kubenswrapper[4848]: E0128 13:01:36.411026 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521\": container with ID starting with 67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521 not found: ID does not exist" containerID="67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.411091 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521"} err="failed to get container status \"67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521\": rpc error: code = NotFound desc = could not find container \"67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521\": container with ID starting with 67582c9897053602a13d85ee795c178856fa7872eba9fe32b5f92e90e7306521 not found: ID does not exist" Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.413607 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5dbnv"] Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.420902 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5dbnv"] Jan 28 13:01:36 crc kubenswrapper[4848]: I0128 13:01:36.859985 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3bd5c0e-2656-4237-a9ab-e4de84101595" path="/var/lib/kubelet/pods/c3bd5c0e-2656-4237-a9ab-e4de84101595/volumes" Jan 28 13:01:38 crc kubenswrapper[4848]: I0128 13:01:38.380835 4848 generic.go:334] "Generic (PLEG): container finished" podID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerID="600d8c6b1a5c18e0cc9acdb76538fce3f7643c90ee34c781aea851c778e60e2b" exitCode=0 Jan 28 13:01:38 crc kubenswrapper[4848]: I0128 13:01:38.381020 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" event={"ID":"8a8f5de6-d418-43d7-855b-c4773b3dc691","Type":"ContainerDied","Data":"600d8c6b1a5c18e0cc9acdb76538fce3f7643c90ee34c781aea851c778e60e2b"} Jan 28 13:01:39 crc kubenswrapper[4848]: I0128 13:01:39.393769 4848 generic.go:334] "Generic (PLEG): container finished" podID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerID="6769efbd6d8d0759ec7c7ff111f0da133d246de52dae3d31bc718a41c5ed5c95" exitCode=0 Jan 28 13:01:39 crc kubenswrapper[4848]: I0128 13:01:39.393841 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" event={"ID":"8a8f5de6-d418-43d7-855b-c4773b3dc691","Type":"ContainerDied","Data":"6769efbd6d8d0759ec7c7ff111f0da133d246de52dae3d31bc718a41c5ed5c95"} Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.688168 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.755009 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2wrw\" (UniqueName: \"kubernetes.io/projected/8a8f5de6-d418-43d7-855b-c4773b3dc691-kube-api-access-c2wrw\") pod \"8a8f5de6-d418-43d7-855b-c4773b3dc691\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.755124 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-util\") pod \"8a8f5de6-d418-43d7-855b-c4773b3dc691\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.755293 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-bundle\") pod \"8a8f5de6-d418-43d7-855b-c4773b3dc691\" (UID: \"8a8f5de6-d418-43d7-855b-c4773b3dc691\") " Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.756974 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-bundle" (OuterVolumeSpecName: "bundle") pod "8a8f5de6-d418-43d7-855b-c4773b3dc691" (UID: "8a8f5de6-d418-43d7-855b-c4773b3dc691"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.766510 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a8f5de6-d418-43d7-855b-c4773b3dc691-kube-api-access-c2wrw" (OuterVolumeSpecName: "kube-api-access-c2wrw") pod "8a8f5de6-d418-43d7-855b-c4773b3dc691" (UID: "8a8f5de6-d418-43d7-855b-c4773b3dc691"). InnerVolumeSpecName "kube-api-access-c2wrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.777139 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-util" (OuterVolumeSpecName: "util") pod "8a8f5de6-d418-43d7-855b-c4773b3dc691" (UID: "8a8f5de6-d418-43d7-855b-c4773b3dc691"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.856894 4848 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.857051 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2wrw\" (UniqueName: \"kubernetes.io/projected/8a8f5de6-d418-43d7-855b-c4773b3dc691-kube-api-access-c2wrw\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:40 crc kubenswrapper[4848]: I0128 13:01:40.857066 4848 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a8f5de6-d418-43d7-855b-c4773b3dc691-util\") on node \"crc\" DevicePath \"\"" Jan 28 13:01:41 crc kubenswrapper[4848]: I0128 13:01:41.409897 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" event={"ID":"8a8f5de6-d418-43d7-855b-c4773b3dc691","Type":"ContainerDied","Data":"0a57bd47814aed85f0fc0820de333f810bc5c2deb4209e878a00b0888fea3aae"} Jan 28 13:01:41 crc kubenswrapper[4848]: I0128 13:01:41.409963 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a57bd47814aed85f0fc0820de333f810bc5c2deb4209e878a00b0888fea3aae" Jan 28 13:01:41 crc kubenswrapper[4848]: I0128 13:01:41.410038 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.860876 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq"] Jan 28 13:01:49 crc kubenswrapper[4848]: E0128 13:01:49.861771 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerName="pull" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.861788 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerName="pull" Jan 28 13:01:49 crc kubenswrapper[4848]: E0128 13:01:49.861806 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerName="extract" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.861812 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerName="extract" Jan 28 13:01:49 crc kubenswrapper[4848]: E0128 13:01:49.861821 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerName="util" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.861826 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerName="util" Jan 28 13:01:49 crc kubenswrapper[4848]: E0128 13:01:49.861837 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3bd5c0e-2656-4237-a9ab-e4de84101595" containerName="console" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.861843 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3bd5c0e-2656-4237-a9ab-e4de84101595" containerName="console" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.861946 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3bd5c0e-2656-4237-a9ab-e4de84101595" containerName="console" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.861962 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a8f5de6-d418-43d7-855b-c4773b3dc691" containerName="extract" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.862403 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.864768 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.864826 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.865317 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.865754 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-sgc66" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.865967 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.880909 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq"] Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.890069 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-apiservice-cert\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.890151 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br5tc\" (UniqueName: \"kubernetes.io/projected/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-kube-api-access-br5tc\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.890285 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-webhook-cert\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.992105 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-webhook-cert\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.992213 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-apiservice-cert\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:49 crc kubenswrapper[4848]: I0128 13:01:49.992271 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br5tc\" (UniqueName: \"kubernetes.io/projected/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-kube-api-access-br5tc\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.001535 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-apiservice-cert\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.009219 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-webhook-cert\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.011322 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br5tc\" (UniqueName: \"kubernetes.io/projected/1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b-kube-api-access-br5tc\") pod \"metallb-operator-controller-manager-767fd6bd7f-8fzzq\" (UID: \"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b\") " pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.186895 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.302152 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf"] Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.303001 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.314688 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.314977 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-w758h" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.315094 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.346782 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf"] Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.399485 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9sj2\" (UniqueName: \"kubernetes.io/projected/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-kube-api-access-k9sj2\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.399694 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-webhook-cert\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.399720 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-apiservice-cert\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.504938 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9sj2\" (UniqueName: \"kubernetes.io/projected/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-kube-api-access-k9sj2\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.506880 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-webhook-cert\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.506990 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-apiservice-cert\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.522489 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-webhook-cert\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.522536 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-apiservice-cert\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.533835 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9sj2\" (UniqueName: \"kubernetes.io/projected/a8573d7d-c62b-45f5-9f5c-90a45126a2f4-kube-api-access-k9sj2\") pod \"metallb-operator-webhook-server-7d6997b498-j9mdf\" (UID: \"a8573d7d-c62b-45f5-9f5c-90a45126a2f4\") " pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.565942 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq"] Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.639320 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:01:50 crc kubenswrapper[4848]: I0128 13:01:50.966902 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf"] Jan 28 13:01:50 crc kubenswrapper[4848]: W0128 13:01:50.972783 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8573d7d_c62b_45f5_9f5c_90a45126a2f4.slice/crio-20d85d9f279840eb5b4b7063bc9e407153289075995f7bde7abfa428cb8418b7 WatchSource:0}: Error finding container 20d85d9f279840eb5b4b7063bc9e407153289075995f7bde7abfa428cb8418b7: Status 404 returned error can't find the container with id 20d85d9f279840eb5b4b7063bc9e407153289075995f7bde7abfa428cb8418b7 Jan 28 13:01:51 crc kubenswrapper[4848]: I0128 13:01:51.534578 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" event={"ID":"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b","Type":"ContainerStarted","Data":"1653c2c6e9bd4d5ef265ccd9f912c08cba35773facbf95d6f33a062373f41105"} Jan 28 13:01:51 crc kubenswrapper[4848]: I0128 13:01:51.545369 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" event={"ID":"a8573d7d-c62b-45f5-9f5c-90a45126a2f4","Type":"ContainerStarted","Data":"20d85d9f279840eb5b4b7063bc9e407153289075995f7bde7abfa428cb8418b7"} Jan 28 13:01:56 crc kubenswrapper[4848]: I0128 13:01:56.584391 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" event={"ID":"1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b","Type":"ContainerStarted","Data":"36359500169eac12f0ff53a33b139071870e9b0ddfaaa3d26990a42ebd65f6eb"} Jan 28 13:01:56 crc kubenswrapper[4848]: I0128 13:01:56.586523 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:01:56 crc kubenswrapper[4848]: I0128 13:01:56.629894 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" podStartSLOduration=4.077039539 podStartE2EDuration="7.629876995s" podCreationTimestamp="2026-01-28 13:01:49 +0000 UTC" firstStartedPulling="2026-01-28 13:01:50.580748626 +0000 UTC m=+937.492965674" lastFinishedPulling="2026-01-28 13:01:54.133586092 +0000 UTC m=+941.045803130" observedRunningTime="2026-01-28 13:01:56.629265409 +0000 UTC m=+943.541482457" watchObservedRunningTime="2026-01-28 13:01:56.629876995 +0000 UTC m=+943.542094033" Jan 28 13:01:57 crc kubenswrapper[4848]: I0128 13:01:57.593885 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" event={"ID":"a8573d7d-c62b-45f5-9f5c-90a45126a2f4","Type":"ContainerStarted","Data":"a5fe46b134a8168df29d04febbeb1c5f1105906381fc486a97cf10a363daa2ca"} Jan 28 13:01:58 crc kubenswrapper[4848]: I0128 13:01:58.601683 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.150679 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" podStartSLOduration=12.87811531 podStartE2EDuration="18.150654163s" podCreationTimestamp="2026-01-28 13:01:50 +0000 UTC" firstStartedPulling="2026-01-28 13:01:50.980385085 +0000 UTC m=+937.892602123" lastFinishedPulling="2026-01-28 13:01:56.252923928 +0000 UTC m=+943.165140976" observedRunningTime="2026-01-28 13:01:57.622151031 +0000 UTC m=+944.534368079" watchObservedRunningTime="2026-01-28 13:02:08.150654163 +0000 UTC m=+955.062871201" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.156192 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-46s59"] Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.157492 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.199191 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s59"] Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.280697 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-catalog-content\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.280829 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-utilities\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.280878 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ldpf\" (UniqueName: \"kubernetes.io/projected/43052041-f958-425e-9fbe-8534c81f0abb-kube-api-access-9ldpf\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.382394 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-catalog-content\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.382480 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-utilities\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.382515 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ldpf\" (UniqueName: \"kubernetes.io/projected/43052041-f958-425e-9fbe-8534c81f0abb-kube-api-access-9ldpf\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.383073 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-catalog-content\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.383096 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-utilities\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.412633 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ldpf\" (UniqueName: \"kubernetes.io/projected/43052041-f958-425e-9fbe-8534c81f0abb-kube-api-access-9ldpf\") pod \"redhat-marketplace-46s59\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.491466 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.955207 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-728g9"] Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.957638 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.977351 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-728g9"] Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.991220 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-catalog-content\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.991390 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6lnb\" (UniqueName: \"kubernetes.io/projected/d2b3786b-db5c-4de5-8e62-840330766f9c-kube-api-access-q6lnb\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:08 crc kubenswrapper[4848]: I0128 13:02:08.991418 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-utilities\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.051526 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s59"] Jan 28 13:02:09 crc kubenswrapper[4848]: W0128 13:02:09.064337 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43052041_f958_425e_9fbe_8534c81f0abb.slice/crio-fb6d518f3478a265207a0dca67f685f47c1473af8d96c10cf2b48d18905fd155 WatchSource:0}: Error finding container fb6d518f3478a265207a0dca67f685f47c1473af8d96c10cf2b48d18905fd155: Status 404 returned error can't find the container with id fb6d518f3478a265207a0dca67f685f47c1473af8d96c10cf2b48d18905fd155 Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.093628 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6lnb\" (UniqueName: \"kubernetes.io/projected/d2b3786b-db5c-4de5-8e62-840330766f9c-kube-api-access-q6lnb\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.093715 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-utilities\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.093811 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-catalog-content\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.094514 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-catalog-content\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.094574 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-utilities\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.118931 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6lnb\" (UniqueName: \"kubernetes.io/projected/d2b3786b-db5c-4de5-8e62-840330766f9c-kube-api-access-q6lnb\") pod \"community-operators-728g9\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.277693 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.562018 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-728g9"] Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.676875 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-728g9" event={"ID":"d2b3786b-db5c-4de5-8e62-840330766f9c","Type":"ContainerStarted","Data":"142accbf2d15cf974bfc504368ecf14897c2a87cd5b709919ff6854e5436fdab"} Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.679362 4848 generic.go:334] "Generic (PLEG): container finished" podID="43052041-f958-425e-9fbe-8534c81f0abb" containerID="1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62" exitCode=0 Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.679422 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s59" event={"ID":"43052041-f958-425e-9fbe-8534c81f0abb","Type":"ContainerDied","Data":"1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62"} Jan 28 13:02:09 crc kubenswrapper[4848]: I0128 13:02:09.679462 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s59" event={"ID":"43052041-f958-425e-9fbe-8534c81f0abb","Type":"ContainerStarted","Data":"fb6d518f3478a265207a0dca67f685f47c1473af8d96c10cf2b48d18905fd155"} Jan 28 13:02:10 crc kubenswrapper[4848]: I0128 13:02:10.646219 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7d6997b498-j9mdf" Jan 28 13:02:10 crc kubenswrapper[4848]: I0128 13:02:10.687994 4848 generic.go:334] "Generic (PLEG): container finished" podID="43052041-f958-425e-9fbe-8534c81f0abb" containerID="bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc" exitCode=0 Jan 28 13:02:10 crc kubenswrapper[4848]: I0128 13:02:10.688402 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s59" event={"ID":"43052041-f958-425e-9fbe-8534c81f0abb","Type":"ContainerDied","Data":"bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc"} Jan 28 13:02:10 crc kubenswrapper[4848]: I0128 13:02:10.693509 4848 generic.go:334] "Generic (PLEG): container finished" podID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerID="f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6" exitCode=0 Jan 28 13:02:10 crc kubenswrapper[4848]: I0128 13:02:10.693598 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-728g9" event={"ID":"d2b3786b-db5c-4de5-8e62-840330766f9c","Type":"ContainerDied","Data":"f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6"} Jan 28 13:02:11 crc kubenswrapper[4848]: I0128 13:02:11.705064 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-728g9" event={"ID":"d2b3786b-db5c-4de5-8e62-840330766f9c","Type":"ContainerStarted","Data":"b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da"} Jan 28 13:02:11 crc kubenswrapper[4848]: I0128 13:02:11.711651 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s59" event={"ID":"43052041-f958-425e-9fbe-8534c81f0abb","Type":"ContainerStarted","Data":"6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75"} Jan 28 13:02:12 crc kubenswrapper[4848]: I0128 13:02:12.722674 4848 generic.go:334] "Generic (PLEG): container finished" podID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerID="b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da" exitCode=0 Jan 28 13:02:12 crc kubenswrapper[4848]: I0128 13:02:12.722997 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-728g9" event={"ID":"d2b3786b-db5c-4de5-8e62-840330766f9c","Type":"ContainerDied","Data":"b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da"} Jan 28 13:02:12 crc kubenswrapper[4848]: I0128 13:02:12.749344 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-46s59" podStartSLOduration=3.346804523 podStartE2EDuration="4.749313477s" podCreationTimestamp="2026-01-28 13:02:08 +0000 UTC" firstStartedPulling="2026-01-28 13:02:09.681137995 +0000 UTC m=+956.593355033" lastFinishedPulling="2026-01-28 13:02:11.083646949 +0000 UTC m=+957.995863987" observedRunningTime="2026-01-28 13:02:11.755513372 +0000 UTC m=+958.667730410" watchObservedRunningTime="2026-01-28 13:02:12.749313477 +0000 UTC m=+959.661530515" Jan 28 13:02:13 crc kubenswrapper[4848]: I0128 13:02:13.732093 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-728g9" event={"ID":"d2b3786b-db5c-4de5-8e62-840330766f9c","Type":"ContainerStarted","Data":"bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470"} Jan 28 13:02:13 crc kubenswrapper[4848]: I0128 13:02:13.751185 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-728g9" podStartSLOduration=3.276375628 podStartE2EDuration="5.751157524s" podCreationTimestamp="2026-01-28 13:02:08 +0000 UTC" firstStartedPulling="2026-01-28 13:02:10.695496574 +0000 UTC m=+957.607713622" lastFinishedPulling="2026-01-28 13:02:13.17027848 +0000 UTC m=+960.082495518" observedRunningTime="2026-01-28 13:02:13.750091395 +0000 UTC m=+960.662308433" watchObservedRunningTime="2026-01-28 13:02:13.751157524 +0000 UTC m=+960.663374562" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.492334 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.493148 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.539810 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.551766 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zj4gk"] Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.553124 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.582058 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zj4gk"] Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.672742 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-catalog-content\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.672869 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76tk7\" (UniqueName: \"kubernetes.io/projected/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-kube-api-access-76tk7\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.672905 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-utilities\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.774615 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-catalog-content\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.774693 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76tk7\" (UniqueName: \"kubernetes.io/projected/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-kube-api-access-76tk7\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.774720 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-utilities\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.775363 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-utilities\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.775590 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-catalog-content\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.800149 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76tk7\" (UniqueName: \"kubernetes.io/projected/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-kube-api-access-76tk7\") pod \"certified-operators-zj4gk\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.866565 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:18 crc kubenswrapper[4848]: I0128 13:02:18.877182 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.231606 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zj4gk"] Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.278514 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.278596 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.364449 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.821552 4848 generic.go:334] "Generic (PLEG): container finished" podID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerID="dd280effd835df8dcfefefef2d18b909cb2efb4dca3c0229e69a069c2e227dc9" exitCode=0 Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.821692 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj4gk" event={"ID":"ecdda85c-e4aa-4426-ac24-cea5ad5bd610","Type":"ContainerDied","Data":"dd280effd835df8dcfefefef2d18b909cb2efb4dca3c0229e69a069c2e227dc9"} Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.822058 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj4gk" event={"ID":"ecdda85c-e4aa-4426-ac24-cea5ad5bd610","Type":"ContainerStarted","Data":"d84642b9396e7ce4462a3d24af1cdec202ea58b3598287ab102de254fb896f7c"} Jan 28 13:02:19 crc kubenswrapper[4848]: I0128 13:02:19.864737 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:20 crc kubenswrapper[4848]: I0128 13:02:20.744516 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-728g9"] Jan 28 13:02:20 crc kubenswrapper[4848]: I0128 13:02:20.830675 4848 generic.go:334] "Generic (PLEG): container finished" podID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerID="4ca3ba96045169b423b6d606da036db86fc38fca4704abf6b2f859045c183acd" exitCode=0 Jan 28 13:02:20 crc kubenswrapper[4848]: I0128 13:02:20.830975 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj4gk" event={"ID":"ecdda85c-e4aa-4426-ac24-cea5ad5bd610","Type":"ContainerDied","Data":"4ca3ba96045169b423b6d606da036db86fc38fca4704abf6b2f859045c183acd"} Jan 28 13:02:21 crc kubenswrapper[4848]: I0128 13:02:21.840453 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj4gk" event={"ID":"ecdda85c-e4aa-4426-ac24-cea5ad5bd610","Type":"ContainerStarted","Data":"29811fa6c5c9c4ff0dd65269c60573f057a0dd859c4589aa4fb9d88b1bf81659"} Jan 28 13:02:21 crc kubenswrapper[4848]: I0128 13:02:21.840726 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-728g9" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="registry-server" containerID="cri-o://bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470" gracePeriod=2 Jan 28 13:02:21 crc kubenswrapper[4848]: I0128 13:02:21.862215 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zj4gk" podStartSLOduration=2.238432215 podStartE2EDuration="3.862188145s" podCreationTimestamp="2026-01-28 13:02:18 +0000 UTC" firstStartedPulling="2026-01-28 13:02:19.824016737 +0000 UTC m=+966.736233775" lastFinishedPulling="2026-01-28 13:02:21.447772667 +0000 UTC m=+968.359989705" observedRunningTime="2026-01-28 13:02:21.861150147 +0000 UTC m=+968.773367195" watchObservedRunningTime="2026-01-28 13:02:21.862188145 +0000 UTC m=+968.774405183" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.228111 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.329841 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6lnb\" (UniqueName: \"kubernetes.io/projected/d2b3786b-db5c-4de5-8e62-840330766f9c-kube-api-access-q6lnb\") pod \"d2b3786b-db5c-4de5-8e62-840330766f9c\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.329921 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-catalog-content\") pod \"d2b3786b-db5c-4de5-8e62-840330766f9c\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.329995 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-utilities\") pod \"d2b3786b-db5c-4de5-8e62-840330766f9c\" (UID: \"d2b3786b-db5c-4de5-8e62-840330766f9c\") " Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.331452 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-utilities" (OuterVolumeSpecName: "utilities") pod "d2b3786b-db5c-4de5-8e62-840330766f9c" (UID: "d2b3786b-db5c-4de5-8e62-840330766f9c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.345812 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2b3786b-db5c-4de5-8e62-840330766f9c-kube-api-access-q6lnb" (OuterVolumeSpecName: "kube-api-access-q6lnb") pod "d2b3786b-db5c-4de5-8e62-840330766f9c" (UID: "d2b3786b-db5c-4de5-8e62-840330766f9c"). InnerVolumeSpecName "kube-api-access-q6lnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.432324 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.432375 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6lnb\" (UniqueName: \"kubernetes.io/projected/d2b3786b-db5c-4de5-8e62-840330766f9c-kube-api-access-q6lnb\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.614846 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2b3786b-db5c-4de5-8e62-840330766f9c" (UID: "d2b3786b-db5c-4de5-8e62-840330766f9c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.635624 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2b3786b-db5c-4de5-8e62-840330766f9c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.848402 4848 generic.go:334] "Generic (PLEG): container finished" podID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerID="bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470" exitCode=0 Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.848598 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-728g9" event={"ID":"d2b3786b-db5c-4de5-8e62-840330766f9c","Type":"ContainerDied","Data":"bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470"} Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.848726 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-728g9" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.850656 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-728g9" event={"ID":"d2b3786b-db5c-4de5-8e62-840330766f9c","Type":"ContainerDied","Data":"142accbf2d15cf974bfc504368ecf14897c2a87cd5b709919ff6854e5436fdab"} Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.850778 4848 scope.go:117] "RemoveContainer" containerID="bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.870275 4848 scope.go:117] "RemoveContainer" containerID="b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.893495 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-728g9"] Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.899712 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-728g9"] Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.908452 4848 scope.go:117] "RemoveContainer" containerID="f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.928603 4848 scope.go:117] "RemoveContainer" containerID="bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470" Jan 28 13:02:22 crc kubenswrapper[4848]: E0128 13:02:22.929715 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470\": container with ID starting with bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470 not found: ID does not exist" containerID="bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.930008 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470"} err="failed to get container status \"bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470\": rpc error: code = NotFound desc = could not find container \"bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470\": container with ID starting with bb3db621c6e344e56e950411323349429614b21d04ed25cb2763838b3aac8470 not found: ID does not exist" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.930101 4848 scope.go:117] "RemoveContainer" containerID="b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da" Jan 28 13:02:22 crc kubenswrapper[4848]: E0128 13:02:22.930809 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da\": container with ID starting with b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da not found: ID does not exist" containerID="b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.930880 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da"} err="failed to get container status \"b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da\": rpc error: code = NotFound desc = could not find container \"b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da\": container with ID starting with b2c22c6cd7fb16249dfa7f1aed52b348f22334a60b14ca800a1447787e7047da not found: ID does not exist" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.930933 4848 scope.go:117] "RemoveContainer" containerID="f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6" Jan 28 13:02:22 crc kubenswrapper[4848]: E0128 13:02:22.931445 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6\": container with ID starting with f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6 not found: ID does not exist" containerID="f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6" Jan 28 13:02:22 crc kubenswrapper[4848]: I0128 13:02:22.931545 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6"} err="failed to get container status \"f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6\": rpc error: code = NotFound desc = could not find container \"f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6\": container with ID starting with f32e28717e714eeb14a0452039999ee7b847235e00631e3a6e76754d23a329c6 not found: ID does not exist" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.345063 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s59"] Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.345774 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-46s59" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="registry-server" containerID="cri-o://6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75" gracePeriod=2 Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.740653 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.776388 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ldpf\" (UniqueName: \"kubernetes.io/projected/43052041-f958-425e-9fbe-8534c81f0abb-kube-api-access-9ldpf\") pod \"43052041-f958-425e-9fbe-8534c81f0abb\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.776445 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-utilities\") pod \"43052041-f958-425e-9fbe-8534c81f0abb\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.776486 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-catalog-content\") pod \"43052041-f958-425e-9fbe-8534c81f0abb\" (UID: \"43052041-f958-425e-9fbe-8534c81f0abb\") " Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.778214 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-utilities" (OuterVolumeSpecName: "utilities") pod "43052041-f958-425e-9fbe-8534c81f0abb" (UID: "43052041-f958-425e-9fbe-8534c81f0abb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.781590 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43052041-f958-425e-9fbe-8534c81f0abb-kube-api-access-9ldpf" (OuterVolumeSpecName: "kube-api-access-9ldpf") pod "43052041-f958-425e-9fbe-8534c81f0abb" (UID: "43052041-f958-425e-9fbe-8534c81f0abb"). InnerVolumeSpecName "kube-api-access-9ldpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.799474 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "43052041-f958-425e-9fbe-8534c81f0abb" (UID: "43052041-f958-425e-9fbe-8534c81f0abb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.860101 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" path="/var/lib/kubelet/pods/d2b3786b-db5c-4de5-8e62-840330766f9c/volumes" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.867822 4848 generic.go:334] "Generic (PLEG): container finished" podID="43052041-f958-425e-9fbe-8534c81f0abb" containerID="6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75" exitCode=0 Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.867913 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s59" event={"ID":"43052041-f958-425e-9fbe-8534c81f0abb","Type":"ContainerDied","Data":"6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75"} Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.867925 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s59" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.868131 4848 scope.go:117] "RemoveContainer" containerID="6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.868074 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s59" event={"ID":"43052041-f958-425e-9fbe-8534c81f0abb","Type":"ContainerDied","Data":"fb6d518f3478a265207a0dca67f685f47c1473af8d96c10cf2b48d18905fd155"} Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.877884 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ldpf\" (UniqueName: \"kubernetes.io/projected/43052041-f958-425e-9fbe-8534c81f0abb-kube-api-access-9ldpf\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.878222 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.878232 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43052041-f958-425e-9fbe-8534c81f0abb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.889721 4848 scope.go:117] "RemoveContainer" containerID="bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.908876 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s59"] Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.921168 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s59"] Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.928327 4848 scope.go:117] "RemoveContainer" containerID="1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.945486 4848 scope.go:117] "RemoveContainer" containerID="6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75" Jan 28 13:02:24 crc kubenswrapper[4848]: E0128 13:02:24.946096 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75\": container with ID starting with 6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75 not found: ID does not exist" containerID="6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.946138 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75"} err="failed to get container status \"6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75\": rpc error: code = NotFound desc = could not find container \"6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75\": container with ID starting with 6cda9ba53a0ec19972186bfe422836637a4d7e58d44dacf050bef1e28ca6dd75 not found: ID does not exist" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.946170 4848 scope.go:117] "RemoveContainer" containerID="bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc" Jan 28 13:02:24 crc kubenswrapper[4848]: E0128 13:02:24.946511 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc\": container with ID starting with bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc not found: ID does not exist" containerID="bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.946545 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc"} err="failed to get container status \"bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc\": rpc error: code = NotFound desc = could not find container \"bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc\": container with ID starting with bdabf4638605f37d3d48de25eb4dd3df6e535b9b79ecf7c85fab13a1785ec0cc not found: ID does not exist" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.946564 4848 scope.go:117] "RemoveContainer" containerID="1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62" Jan 28 13:02:24 crc kubenswrapper[4848]: E0128 13:02:24.946841 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62\": container with ID starting with 1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62 not found: ID does not exist" containerID="1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62" Jan 28 13:02:24 crc kubenswrapper[4848]: I0128 13:02:24.946873 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62"} err="failed to get container status \"1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62\": rpc error: code = NotFound desc = could not find container \"1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62\": container with ID starting with 1a8360d74003caa4cf2f85e0ea6818d9f1115cf7af8fb8db60397a1a98b9fd62 not found: ID does not exist" Jan 28 13:02:26 crc kubenswrapper[4848]: I0128 13:02:26.862019 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43052041-f958-425e-9fbe-8534c81f0abb" path="/var/lib/kubelet/pods/43052041-f958-425e-9fbe-8534c81f0abb/volumes" Jan 28 13:02:28 crc kubenswrapper[4848]: I0128 13:02:28.877890 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:28 crc kubenswrapper[4848]: I0128 13:02:28.878335 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:28 crc kubenswrapper[4848]: I0128 13:02:28.979306 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:29 crc kubenswrapper[4848]: I0128 13:02:29.044059 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:29 crc kubenswrapper[4848]: I0128 13:02:29.950655 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zj4gk"] Jan 28 13:02:30 crc kubenswrapper[4848]: I0128 13:02:30.190891 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-767fd6bd7f-8fzzq" Jan 28 13:02:30 crc kubenswrapper[4848]: I0128 13:02:30.916437 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zj4gk" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="registry-server" containerID="cri-o://29811fa6c5c9c4ff0dd65269c60573f057a0dd859c4589aa4fb9d88b1bf81659" gracePeriod=2 Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.005443 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv"] Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.005833 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="extract-utilities" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.005852 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="extract-utilities" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.005877 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="extract-content" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.005884 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="extract-content" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.005897 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="registry-server" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.005904 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="registry-server" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.005914 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="registry-server" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.005919 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="registry-server" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.005929 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="extract-content" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.005934 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="extract-content" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.005944 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="extract-utilities" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.005950 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="extract-utilities" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.006060 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2b3786b-db5c-4de5-8e62-840330766f9c" containerName="registry-server" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.006080 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="43052041-f958-425e-9fbe-8534c81f0abb" containerName="registry-server" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.006711 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.010433 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.010950 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-rrm9n"] Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.014215 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.023062 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-gn764" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.023402 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.023557 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.026874 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv"] Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088031 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics-certs\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088090 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-reloader\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088120 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twjhp\" (UniqueName: \"kubernetes.io/projected/9bb36bc6-537d-4853-9367-d38c728c6cc7-kube-api-access-twjhp\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088157 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088227 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-startup\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088269 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9744680c-1423-4e9a-a285-bca5722378d9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-kdftv\" (UID: \"9744680c-1423-4e9a-a285-bca5722378d9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088300 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sdtw\" (UniqueName: \"kubernetes.io/projected/9744680c-1423-4e9a-a285-bca5722378d9-kube-api-access-6sdtw\") pod \"frr-k8s-webhook-server-7df86c4f6c-kdftv\" (UID: \"9744680c-1423-4e9a-a285-bca5722378d9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088325 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-conf\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.088370 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-sockets\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.170406 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-sfdg2"] Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.171776 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.174068 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.176835 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.176835 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-f77jj" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.183412 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-tz8dm"] Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.184766 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.186986 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.187236 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.189973 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sdtw\" (UniqueName: \"kubernetes.io/projected/9744680c-1423-4e9a-a285-bca5722378d9-kube-api-access-6sdtw\") pod \"frr-k8s-webhook-server-7df86c4f6c-kdftv\" (UID: \"9744680c-1423-4e9a-a285-bca5722378d9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190013 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-conf\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190053 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-sockets\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190111 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics-certs\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190131 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-reloader\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190147 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twjhp\" (UniqueName: \"kubernetes.io/projected/9bb36bc6-537d-4853-9367-d38c728c6cc7-kube-api-access-twjhp\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190176 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190199 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-startup\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.190215 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9744680c-1423-4e9a-a285-bca5722378d9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-kdftv\" (UID: \"9744680c-1423-4e9a-a285-bca5722378d9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.190355 4848 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.190425 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9744680c-1423-4e9a-a285-bca5722378d9-cert podName:9744680c-1423-4e9a-a285-bca5722378d9 nodeName:}" failed. No retries permitted until 2026-01-28 13:02:31.690400772 +0000 UTC m=+978.602617820 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9744680c-1423-4e9a-a285-bca5722378d9-cert") pod "frr-k8s-webhook-server-7df86c4f6c-kdftv" (UID: "9744680c-1423-4e9a-a285-bca5722378d9") : secret "frr-k8s-webhook-server-cert" not found Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.191113 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-conf\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.191331 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-sockets\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.191390 4848 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.191419 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics-certs podName:9bb36bc6-537d-4853-9367-d38c728c6cc7 nodeName:}" failed. No retries permitted until 2026-01-28 13:02:31.69140948 +0000 UTC m=+978.603626518 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics-certs") pod "frr-k8s-rrm9n" (UID: "9bb36bc6-537d-4853-9367-d38c728c6cc7") : secret "frr-k8s-certs-secret" not found Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.191591 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-reloader\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.191947 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.192664 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9bb36bc6-537d-4853-9367-d38c728c6cc7-frr-startup\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.213622 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-tz8dm"] Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.247031 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sdtw\" (UniqueName: \"kubernetes.io/projected/9744680c-1423-4e9a-a285-bca5722378d9-kube-api-access-6sdtw\") pod \"frr-k8s-webhook-server-7df86c4f6c-kdftv\" (UID: \"9744680c-1423-4e9a-a285-bca5722378d9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.262991 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twjhp\" (UniqueName: \"kubernetes.io/projected/9bb36bc6-537d-4853-9367-d38c728c6cc7-kube-api-access-twjhp\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.292065 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-metrics-certs\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.292119 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.292150 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-metallb-excludel2\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.292171 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-cert\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.292189 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-metrics-certs\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.292219 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbfxw\" (UniqueName: \"kubernetes.io/projected/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-kube-api-access-nbfxw\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.292261 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qvwg\" (UniqueName: \"kubernetes.io/projected/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-kube-api-access-4qvwg\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.394169 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-metrics-certs\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.394702 4848 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.396424 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-metrics-certs podName:ce4dce22-bb0b-4fc3-b724-edbfe04cea8b nodeName:}" failed. No retries permitted until 2026-01-28 13:02:31.896386899 +0000 UTC m=+978.808603937 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-metrics-certs") pod "controller-6968d8fdc4-tz8dm" (UID: "ce4dce22-bb0b-4fc3-b724-edbfe04cea8b") : secret "controller-certs-secret" not found Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.396827 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.397215 4848 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.397278 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist podName:4645d31f-e3e8-4c7a-ace2-c82b88fd7488 nodeName:}" failed. No retries permitted until 2026-01-28 13:02:31.897262682 +0000 UTC m=+978.809479720 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist") pod "speaker-sfdg2" (UID: "4645d31f-e3e8-4c7a-ace2-c82b88fd7488") : secret "metallb-memberlist" not found Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.397352 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-metallb-excludel2\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.397401 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-cert\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.397481 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-metrics-certs\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.397923 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbfxw\" (UniqueName: \"kubernetes.io/projected/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-kube-api-access-nbfxw\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.398218 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-metallb-excludel2\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.398471 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qvwg\" (UniqueName: \"kubernetes.io/projected/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-kube-api-access-4qvwg\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.400017 4848 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.401343 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-metrics-certs\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.415438 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-cert\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.418945 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbfxw\" (UniqueName: \"kubernetes.io/projected/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-kube-api-access-nbfxw\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.419740 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qvwg\" (UniqueName: \"kubernetes.io/projected/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-kube-api-access-4qvwg\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.703350 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics-certs\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.703436 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9744680c-1423-4e9a-a285-bca5722378d9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-kdftv\" (UID: \"9744680c-1423-4e9a-a285-bca5722378d9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.711164 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9744680c-1423-4e9a-a285-bca5722378d9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-kdftv\" (UID: \"9744680c-1423-4e9a-a285-bca5722378d9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.716370 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9bb36bc6-537d-4853-9367-d38c728c6cc7-metrics-certs\") pod \"frr-k8s-rrm9n\" (UID: \"9bb36bc6-537d-4853-9367-d38c728c6cc7\") " pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.907272 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-metrics-certs\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.907350 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.907531 4848 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 13:02:31 crc kubenswrapper[4848]: E0128 13:02:31.907634 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist podName:4645d31f-e3e8-4c7a-ace2-c82b88fd7488 nodeName:}" failed. No retries permitted until 2026-01-28 13:02:32.907605921 +0000 UTC m=+979.819822959 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist") pod "speaker-sfdg2" (UID: "4645d31f-e3e8-4c7a-ace2-c82b88fd7488") : secret "metallb-memberlist" not found Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.912039 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce4dce22-bb0b-4fc3-b724-edbfe04cea8b-metrics-certs\") pod \"controller-6968d8fdc4-tz8dm\" (UID: \"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b\") " pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.929414 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.929620 4848 generic.go:334] "Generic (PLEG): container finished" podID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerID="29811fa6c5c9c4ff0dd65269c60573f057a0dd859c4589aa4fb9d88b1bf81659" exitCode=0 Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.929675 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj4gk" event={"ID":"ecdda85c-e4aa-4426-ac24-cea5ad5bd610","Type":"ContainerDied","Data":"29811fa6c5c9c4ff0dd65269c60573f057a0dd859c4589aa4fb9d88b1bf81659"} Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.964749 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:31 crc kubenswrapper[4848]: I0128 13:02:31.968106 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.009571 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76tk7\" (UniqueName: \"kubernetes.io/projected/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-kube-api-access-76tk7\") pod \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.009637 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-utilities\") pod \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.009713 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-catalog-content\") pod \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\" (UID: \"ecdda85c-e4aa-4426-ac24-cea5ad5bd610\") " Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.011330 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-utilities" (OuterVolumeSpecName: "utilities") pod "ecdda85c-e4aa-4426-ac24-cea5ad5bd610" (UID: "ecdda85c-e4aa-4426-ac24-cea5ad5bd610"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.014969 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-kube-api-access-76tk7" (OuterVolumeSpecName: "kube-api-access-76tk7") pod "ecdda85c-e4aa-4426-ac24-cea5ad5bd610" (UID: "ecdda85c-e4aa-4426-ac24-cea5ad5bd610"). InnerVolumeSpecName "kube-api-access-76tk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.078830 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ecdda85c-e4aa-4426-ac24-cea5ad5bd610" (UID: "ecdda85c-e4aa-4426-ac24-cea5ad5bd610"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.099377 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.112140 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76tk7\" (UniqueName: \"kubernetes.io/projected/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-kube-api-access-76tk7\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.112167 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.112179 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecdda85c-e4aa-4426-ac24-cea5ad5bd610-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.186528 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv"] Jan 28 13:02:32 crc kubenswrapper[4848]: W0128 13:02:32.195312 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9744680c_1423_4e9a_a285_bca5722378d9.slice/crio-d72261e60b7be592fbe83c029cae0d876ca3b95c806681fae52c7329aba51aef WatchSource:0}: Error finding container d72261e60b7be592fbe83c029cae0d876ca3b95c806681fae52c7329aba51aef: Status 404 returned error can't find the container with id d72261e60b7be592fbe83c029cae0d876ca3b95c806681fae52c7329aba51aef Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.357535 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-tz8dm"] Jan 28 13:02:32 crc kubenswrapper[4848]: W0128 13:02:32.364335 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce4dce22_bb0b_4fc3_b724_edbfe04cea8b.slice/crio-bed2a6158ff243bfaba04d0bd545839ea05bdbaa9773570ca258844cfc0030a5 WatchSource:0}: Error finding container bed2a6158ff243bfaba04d0bd545839ea05bdbaa9773570ca258844cfc0030a5: Status 404 returned error can't find the container with id bed2a6158ff243bfaba04d0bd545839ea05bdbaa9773570ca258844cfc0030a5 Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.924449 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.932303 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/4645d31f-e3e8-4c7a-ace2-c82b88fd7488-memberlist\") pod \"speaker-sfdg2\" (UID: \"4645d31f-e3e8-4c7a-ace2-c82b88fd7488\") " pod="metallb-system/speaker-sfdg2" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.938568 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" event={"ID":"9744680c-1423-4e9a-a285-bca5722378d9","Type":"ContainerStarted","Data":"d72261e60b7be592fbe83c029cae0d876ca3b95c806681fae52c7329aba51aef"} Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.940949 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-tz8dm" event={"ID":"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b","Type":"ContainerStarted","Data":"c42d12b2c91586764327816f7f41df0f00ca7125e07a94841df51ce6a27f7024"} Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.941098 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.941172 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-tz8dm" event={"ID":"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b","Type":"ContainerStarted","Data":"f845287498ebc75ea6536e424e7b15929a71813ed2d0e36163bb65acc6df7191"} Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.941268 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-tz8dm" event={"ID":"ce4dce22-bb0b-4fc3-b724-edbfe04cea8b","Type":"ContainerStarted","Data":"bed2a6158ff243bfaba04d0bd545839ea05bdbaa9773570ca258844cfc0030a5"} Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.943503 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zj4gk" event={"ID":"ecdda85c-e4aa-4426-ac24-cea5ad5bd610","Type":"ContainerDied","Data":"d84642b9396e7ce4462a3d24af1cdec202ea58b3598287ab102de254fb896f7c"} Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.943533 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zj4gk" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.943555 4848 scope.go:117] "RemoveContainer" containerID="29811fa6c5c9c4ff0dd65269c60573f057a0dd859c4589aa4fb9d88b1bf81659" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.946405 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerStarted","Data":"dc5efaecce02cc70db92cdfb12045fc28d61e7d684653558d9bf4bb3e901cffe"} Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.960594 4848 scope.go:117] "RemoveContainer" containerID="4ca3ba96045169b423b6d606da036db86fc38fca4704abf6b2f859045c183acd" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.971474 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-tz8dm" podStartSLOduration=1.9714487269999998 podStartE2EDuration="1.971448727s" podCreationTimestamp="2026-01-28 13:02:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:02:32.96163742 +0000 UTC m=+979.873854468" watchObservedRunningTime="2026-01-28 13:02:32.971448727 +0000 UTC m=+979.883665765" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.979594 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zj4gk"] Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.983199 4848 scope.go:117] "RemoveContainer" containerID="dd280effd835df8dcfefefef2d18b909cb2efb4dca3c0229e69a069c2e227dc9" Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.987280 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zj4gk"] Jan 28 13:02:32 crc kubenswrapper[4848]: I0128 13:02:32.987803 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-sfdg2" Jan 28 13:02:33 crc kubenswrapper[4848]: I0128 13:02:33.965280 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sfdg2" event={"ID":"4645d31f-e3e8-4c7a-ace2-c82b88fd7488","Type":"ContainerStarted","Data":"e527d34effe099d6825bb1c40797eb8ce1395dddfc647a57da45a197e508f6ba"} Jan 28 13:02:33 crc kubenswrapper[4848]: I0128 13:02:33.965772 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sfdg2" event={"ID":"4645d31f-e3e8-4c7a-ace2-c82b88fd7488","Type":"ContainerStarted","Data":"a8abae6ffacb991d275f525b80da3e056f36b6ccff643612726952871ced06e7"} Jan 28 13:02:33 crc kubenswrapper[4848]: I0128 13:02:33.965820 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sfdg2" event={"ID":"4645d31f-e3e8-4c7a-ace2-c82b88fd7488","Type":"ContainerStarted","Data":"fb304e2d01c7a34fccb11fb0540a511cf44299c6eb46d97fd79bda26299c357f"} Jan 28 13:02:33 crc kubenswrapper[4848]: I0128 13:02:33.966284 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-sfdg2" Jan 28 13:02:34 crc kubenswrapper[4848]: I0128 13:02:34.002135 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-sfdg2" podStartSLOduration=3.002106387 podStartE2EDuration="3.002106387s" podCreationTimestamp="2026-01-28 13:02:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:02:33.996679389 +0000 UTC m=+980.908896427" watchObservedRunningTime="2026-01-28 13:02:34.002106387 +0000 UTC m=+980.914323425" Jan 28 13:02:34 crc kubenswrapper[4848]: I0128 13:02:34.860703 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" path="/var/lib/kubelet/pods/ecdda85c-e4aa-4426-ac24-cea5ad5bd610/volumes" Jan 28 13:02:41 crc kubenswrapper[4848]: I0128 13:02:41.039587 4848 generic.go:334] "Generic (PLEG): container finished" podID="9bb36bc6-537d-4853-9367-d38c728c6cc7" containerID="3fafb54080c82aa84d1115c5965ff49272cc3adbac069e53209fec467f254051" exitCode=0 Jan 28 13:02:41 crc kubenswrapper[4848]: I0128 13:02:41.039694 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerDied","Data":"3fafb54080c82aa84d1115c5965ff49272cc3adbac069e53209fec467f254051"} Jan 28 13:02:41 crc kubenswrapper[4848]: I0128 13:02:41.043641 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" event={"ID":"9744680c-1423-4e9a-a285-bca5722378d9","Type":"ContainerStarted","Data":"68bf8b72ce3d910b1fd95997e4cfba2192d309d54001967a4719aaf81cabcb74"} Jan 28 13:02:41 crc kubenswrapper[4848]: I0128 13:02:41.043826 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:42 crc kubenswrapper[4848]: I0128 13:02:42.054738 4848 generic.go:334] "Generic (PLEG): container finished" podID="9bb36bc6-537d-4853-9367-d38c728c6cc7" containerID="d5c20b500e138bece6eac6cadac8dd9e24e6973bc684601caa28ce192240f1ce" exitCode=0 Jan 28 13:02:42 crc kubenswrapper[4848]: I0128 13:02:42.054841 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerDied","Data":"d5c20b500e138bece6eac6cadac8dd9e24e6973bc684601caa28ce192240f1ce"} Jan 28 13:02:42 crc kubenswrapper[4848]: I0128 13:02:42.084519 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" podStartSLOduration=3.976887398 podStartE2EDuration="12.084495367s" podCreationTimestamp="2026-01-28 13:02:30 +0000 UTC" firstStartedPulling="2026-01-28 13:02:32.198958168 +0000 UTC m=+979.111175206" lastFinishedPulling="2026-01-28 13:02:40.306566137 +0000 UTC m=+987.218783175" observedRunningTime="2026-01-28 13:02:41.11584149 +0000 UTC m=+988.028058528" watchObservedRunningTime="2026-01-28 13:02:42.084495367 +0000 UTC m=+988.996712405" Jan 28 13:02:42 crc kubenswrapper[4848]: I0128 13:02:42.109041 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-tz8dm" Jan 28 13:02:43 crc kubenswrapper[4848]: I0128 13:02:43.065552 4848 generic.go:334] "Generic (PLEG): container finished" podID="9bb36bc6-537d-4853-9367-d38c728c6cc7" containerID="56effe7925694f668bd4c38b5cef5f5fd35e3ee121a6cbe70cc0a78f4c73993a" exitCode=0 Jan 28 13:02:43 crc kubenswrapper[4848]: I0128 13:02:43.065608 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerDied","Data":"56effe7925694f668bd4c38b5cef5f5fd35e3ee121a6cbe70cc0a78f4c73993a"} Jan 28 13:02:44 crc kubenswrapper[4848]: I0128 13:02:44.083571 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerStarted","Data":"f6f6fab9e3f6a8e8fe395a1e89e88c0b5d5f90b5e479822869d42ba0c8d6efd8"} Jan 28 13:02:44 crc kubenswrapper[4848]: I0128 13:02:44.083638 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerStarted","Data":"6f79c06d967be719b36bbb81c09458c2c0d39fbcc057b6a0b43cf886aff7fb16"} Jan 28 13:02:44 crc kubenswrapper[4848]: I0128 13:02:44.083653 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerStarted","Data":"e360d4e7893e172bdb23e5c9cd65855650b0d915cbda65b424ea9ac8365ba333"} Jan 28 13:02:44 crc kubenswrapper[4848]: I0128 13:02:44.083668 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerStarted","Data":"e2eaf78f93698b119439a27d843e04507c372cc3bcf9800faf16013aac50d2e3"} Jan 28 13:02:45 crc kubenswrapper[4848]: I0128 13:02:45.112867 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerStarted","Data":"b47351cc7f02ab5fcb341f4d00759ab8f32d7f78bcb59878128be1e0cf5ab409"} Jan 28 13:02:45 crc kubenswrapper[4848]: I0128 13:02:45.113328 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rrm9n" event={"ID":"9bb36bc6-537d-4853-9367-d38c728c6cc7","Type":"ContainerStarted","Data":"f7e20e6ff5b209b4398db2df75396a54df9e66afd570b89c47d770f90ef46340"} Jan 28 13:02:45 crc kubenswrapper[4848]: I0128 13:02:45.113360 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:45 crc kubenswrapper[4848]: I0128 13:02:45.142616 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-rrm9n" podStartSLOduration=6.927535698 podStartE2EDuration="15.142592851s" podCreationTimestamp="2026-01-28 13:02:30 +0000 UTC" firstStartedPulling="2026-01-28 13:02:32.115598662 +0000 UTC m=+979.027815700" lastFinishedPulling="2026-01-28 13:02:40.330655805 +0000 UTC m=+987.242872853" observedRunningTime="2026-01-28 13:02:45.138461178 +0000 UTC m=+992.050678216" watchObservedRunningTime="2026-01-28 13:02:45.142592851 +0000 UTC m=+992.054809889" Jan 28 13:02:46 crc kubenswrapper[4848]: I0128 13:02:46.968794 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:47 crc kubenswrapper[4848]: I0128 13:02:47.017199 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:02:51 crc kubenswrapper[4848]: I0128 13:02:51.934741 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-kdftv" Jan 28 13:02:52 crc kubenswrapper[4848]: I0128 13:02:52.992872 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-sfdg2" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.025612 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-r4kd4"] Jan 28 13:02:56 crc kubenswrapper[4848]: E0128 13:02:56.026863 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="extract-utilities" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.026887 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="extract-utilities" Jan 28 13:02:56 crc kubenswrapper[4848]: E0128 13:02:56.026912 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="extract-content" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.026919 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="extract-content" Jan 28 13:02:56 crc kubenswrapper[4848]: E0128 13:02:56.026939 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="registry-server" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.026946 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="registry-server" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.027112 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecdda85c-e4aa-4426-ac24-cea5ad5bd610" containerName="registry-server" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.027801 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r4kd4" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.029738 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.030181 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-8wl69" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.030359 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.045917 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r4kd4"] Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.119383 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgkq5\" (UniqueName: \"kubernetes.io/projected/cf309d7d-db34-471f-939d-8b0367857db0-kube-api-access-jgkq5\") pod \"openstack-operator-index-r4kd4\" (UID: \"cf309d7d-db34-471f-939d-8b0367857db0\") " pod="openstack-operators/openstack-operator-index-r4kd4" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.222858 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgkq5\" (UniqueName: \"kubernetes.io/projected/cf309d7d-db34-471f-939d-8b0367857db0-kube-api-access-jgkq5\") pod \"openstack-operator-index-r4kd4\" (UID: \"cf309d7d-db34-471f-939d-8b0367857db0\") " pod="openstack-operators/openstack-operator-index-r4kd4" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.250610 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgkq5\" (UniqueName: \"kubernetes.io/projected/cf309d7d-db34-471f-939d-8b0367857db0-kube-api-access-jgkq5\") pod \"openstack-operator-index-r4kd4\" (UID: \"cf309d7d-db34-471f-939d-8b0367857db0\") " pod="openstack-operators/openstack-operator-index-r4kd4" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.347929 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r4kd4" Jan 28 13:02:56 crc kubenswrapper[4848]: I0128 13:02:56.787293 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r4kd4"] Jan 28 13:02:57 crc kubenswrapper[4848]: I0128 13:02:57.207036 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r4kd4" event={"ID":"cf309d7d-db34-471f-939d-8b0367857db0","Type":"ContainerStarted","Data":"26f1802f189444654d44498aaabdea0728c972493b640ec8f6a35799b12f2c0a"} Jan 28 13:02:59 crc kubenswrapper[4848]: I0128 13:02:59.225718 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r4kd4" event={"ID":"cf309d7d-db34-471f-939d-8b0367857db0","Type":"ContainerStarted","Data":"69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1"} Jan 28 13:02:59 crc kubenswrapper[4848]: I0128 13:02:59.248236 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-r4kd4" podStartSLOduration=1.006212076 podStartE2EDuration="3.248202361s" podCreationTimestamp="2026-01-28 13:02:56 +0000 UTC" firstStartedPulling="2026-01-28 13:02:56.794059672 +0000 UTC m=+1003.706276740" lastFinishedPulling="2026-01-28 13:02:59.036049987 +0000 UTC m=+1005.948267025" observedRunningTime="2026-01-28 13:02:59.243308307 +0000 UTC m=+1006.155525345" watchObservedRunningTime="2026-01-28 13:02:59.248202361 +0000 UTC m=+1006.160419399" Jan 28 13:02:59 crc kubenswrapper[4848]: I0128 13:02:59.415290 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-r4kd4"] Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.017614 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6jnp2"] Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.018792 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.036787 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6jnp2"] Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.086448 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njjlm\" (UniqueName: \"kubernetes.io/projected/730f88b0-924e-4c06-868f-4baf83bc17a9-kube-api-access-njjlm\") pod \"openstack-operator-index-6jnp2\" (UID: \"730f88b0-924e-4c06-868f-4baf83bc17a9\") " pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.187834 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njjlm\" (UniqueName: \"kubernetes.io/projected/730f88b0-924e-4c06-868f-4baf83bc17a9-kube-api-access-njjlm\") pod \"openstack-operator-index-6jnp2\" (UID: \"730f88b0-924e-4c06-868f-4baf83bc17a9\") " pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.213788 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njjlm\" (UniqueName: \"kubernetes.io/projected/730f88b0-924e-4c06-868f-4baf83bc17a9-kube-api-access-njjlm\") pod \"openstack-operator-index-6jnp2\" (UID: \"730f88b0-924e-4c06-868f-4baf83bc17a9\") " pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.340386 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:00 crc kubenswrapper[4848]: I0128 13:03:00.768195 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6jnp2"] Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.246568 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6jnp2" event={"ID":"730f88b0-924e-4c06-868f-4baf83bc17a9","Type":"ContainerStarted","Data":"10fc3140e6d8936fa91c8dd55902994d05dd635a71ad5b594b5b31c2aa750efa"} Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.247079 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6jnp2" event={"ID":"730f88b0-924e-4c06-868f-4baf83bc17a9","Type":"ContainerStarted","Data":"5bd5ea5e58a8ed3fad2415579f0bbd2ab2ea9c5a87f7228836f25b0fd54aa1ed"} Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.246744 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-r4kd4" podUID="cf309d7d-db34-471f-939d-8b0367857db0" containerName="registry-server" containerID="cri-o://69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1" gracePeriod=2 Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.268617 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6jnp2" podStartSLOduration=1.221457156 podStartE2EDuration="1.268583303s" podCreationTimestamp="2026-01-28 13:03:00 +0000 UTC" firstStartedPulling="2026-01-28 13:03:00.786383223 +0000 UTC m=+1007.698600271" lastFinishedPulling="2026-01-28 13:03:00.83350938 +0000 UTC m=+1007.745726418" observedRunningTime="2026-01-28 13:03:01.264354398 +0000 UTC m=+1008.176571436" watchObservedRunningTime="2026-01-28 13:03:01.268583303 +0000 UTC m=+1008.180800341" Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.637201 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r4kd4" Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.711527 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgkq5\" (UniqueName: \"kubernetes.io/projected/cf309d7d-db34-471f-939d-8b0367857db0-kube-api-access-jgkq5\") pod \"cf309d7d-db34-471f-939d-8b0367857db0\" (UID: \"cf309d7d-db34-471f-939d-8b0367857db0\") " Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.718806 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf309d7d-db34-471f-939d-8b0367857db0-kube-api-access-jgkq5" (OuterVolumeSpecName: "kube-api-access-jgkq5") pod "cf309d7d-db34-471f-939d-8b0367857db0" (UID: "cf309d7d-db34-471f-939d-8b0367857db0"). InnerVolumeSpecName "kube-api-access-jgkq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.816868 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgkq5\" (UniqueName: \"kubernetes.io/projected/cf309d7d-db34-471f-939d-8b0367857db0-kube-api-access-jgkq5\") on node \"crc\" DevicePath \"\"" Jan 28 13:03:01 crc kubenswrapper[4848]: I0128 13:03:01.972808 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-rrm9n" Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.257372 4848 generic.go:334] "Generic (PLEG): container finished" podID="cf309d7d-db34-471f-939d-8b0367857db0" containerID="69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1" exitCode=0 Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.257454 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r4kd4" event={"ID":"cf309d7d-db34-471f-939d-8b0367857db0","Type":"ContainerDied","Data":"69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1"} Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.257499 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r4kd4" event={"ID":"cf309d7d-db34-471f-939d-8b0367857db0","Type":"ContainerDied","Data":"26f1802f189444654d44498aaabdea0728c972493b640ec8f6a35799b12f2c0a"} Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.257522 4848 scope.go:117] "RemoveContainer" containerID="69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1" Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.258013 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r4kd4" Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.281376 4848 scope.go:117] "RemoveContainer" containerID="69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1" Jan 28 13:03:02 crc kubenswrapper[4848]: E0128 13:03:02.282612 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1\": container with ID starting with 69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1 not found: ID does not exist" containerID="69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1" Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.282670 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1"} err="failed to get container status \"69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1\": rpc error: code = NotFound desc = could not find container \"69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1\": container with ID starting with 69af305eb8358fc32044d02bd3d05f58197f4f922508fd85fa0d28e224f7c3e1 not found: ID does not exist" Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.296042 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-r4kd4"] Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.299709 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-r4kd4"] Jan 28 13:03:02 crc kubenswrapper[4848]: I0128 13:03:02.866624 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf309d7d-db34-471f-939d-8b0367857db0" path="/var/lib/kubelet/pods/cf309d7d-db34-471f-939d-8b0367857db0/volumes" Jan 28 13:03:07 crc kubenswrapper[4848]: I0128 13:03:07.924959 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:03:07 crc kubenswrapper[4848]: I0128 13:03:07.925506 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:03:10 crc kubenswrapper[4848]: I0128 13:03:10.341982 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:10 crc kubenswrapper[4848]: I0128 13:03:10.342426 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:10 crc kubenswrapper[4848]: I0128 13:03:10.368841 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:11 crc kubenswrapper[4848]: I0128 13:03:11.354465 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-6jnp2" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.571405 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq"] Jan 28 13:03:16 crc kubenswrapper[4848]: E0128 13:03:16.572772 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf309d7d-db34-471f-939d-8b0367857db0" containerName="registry-server" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.572792 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf309d7d-db34-471f-939d-8b0367857db0" containerName="registry-server" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.572963 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf309d7d-db34-471f-939d-8b0367857db0" containerName="registry-server" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.574307 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.577367 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-ncxdf" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.586975 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq"] Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.748063 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-bundle\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.748232 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk2tc\" (UniqueName: \"kubernetes.io/projected/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-kube-api-access-zk2tc\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.748306 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-util\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.849903 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-bundle\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.850071 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk2tc\" (UniqueName: \"kubernetes.io/projected/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-kube-api-access-zk2tc\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.850135 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-util\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.851131 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-util\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.851129 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-bundle\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.874737 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk2tc\" (UniqueName: \"kubernetes.io/projected/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-kube-api-access-zk2tc\") pod \"06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:16 crc kubenswrapper[4848]: I0128 13:03:16.902694 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:17 crc kubenswrapper[4848]: I0128 13:03:17.336561 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq"] Jan 28 13:03:17 crc kubenswrapper[4848]: I0128 13:03:17.377402 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" event={"ID":"eca5d9bf-13bb-40da-b40a-d9d656a0fcff","Type":"ContainerStarted","Data":"65ad45a39e6d2099a2140d801d87ece12e0c666deeb1a785111d3ac74ed6a7d7"} Jan 28 13:03:18 crc kubenswrapper[4848]: I0128 13:03:18.387723 4848 generic.go:334] "Generic (PLEG): container finished" podID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerID="83c6cb3031b75375068972ee5001367e015d64d59ebeb089d8cb83aabc85d8b5" exitCode=0 Jan 28 13:03:18 crc kubenswrapper[4848]: I0128 13:03:18.387997 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" event={"ID":"eca5d9bf-13bb-40da-b40a-d9d656a0fcff","Type":"ContainerDied","Data":"83c6cb3031b75375068972ee5001367e015d64d59ebeb089d8cb83aabc85d8b5"} Jan 28 13:03:20 crc kubenswrapper[4848]: I0128 13:03:20.408575 4848 generic.go:334] "Generic (PLEG): container finished" podID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerID="f2f6a02e6342f5cd3d417439f8eaa536aa5476397c845858ff13a940f43ccda1" exitCode=0 Jan 28 13:03:20 crc kubenswrapper[4848]: I0128 13:03:20.408655 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" event={"ID":"eca5d9bf-13bb-40da-b40a-d9d656a0fcff","Type":"ContainerDied","Data":"f2f6a02e6342f5cd3d417439f8eaa536aa5476397c845858ff13a940f43ccda1"} Jan 28 13:03:21 crc kubenswrapper[4848]: I0128 13:03:21.421512 4848 generic.go:334] "Generic (PLEG): container finished" podID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerID="5d0f3fd927a003d28af1af72a7c45937090875e799c83e8ba372b46d66489c42" exitCode=0 Jan 28 13:03:21 crc kubenswrapper[4848]: I0128 13:03:21.421575 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" event={"ID":"eca5d9bf-13bb-40da-b40a-d9d656a0fcff","Type":"ContainerDied","Data":"5d0f3fd927a003d28af1af72a7c45937090875e799c83e8ba372b46d66489c42"} Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.682793 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.849529 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-bundle\") pod \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.849656 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-util\") pod \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.850449 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zk2tc\" (UniqueName: \"kubernetes.io/projected/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-kube-api-access-zk2tc\") pod \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\" (UID: \"eca5d9bf-13bb-40da-b40a-d9d656a0fcff\") " Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.851186 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-bundle" (OuterVolumeSpecName: "bundle") pod "eca5d9bf-13bb-40da-b40a-d9d656a0fcff" (UID: "eca5d9bf-13bb-40da-b40a-d9d656a0fcff"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.858513 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-kube-api-access-zk2tc" (OuterVolumeSpecName: "kube-api-access-zk2tc") pod "eca5d9bf-13bb-40da-b40a-d9d656a0fcff" (UID: "eca5d9bf-13bb-40da-b40a-d9d656a0fcff"). InnerVolumeSpecName "kube-api-access-zk2tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.864566 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-util" (OuterVolumeSpecName: "util") pod "eca5d9bf-13bb-40da-b40a-d9d656a0fcff" (UID: "eca5d9bf-13bb-40da-b40a-d9d656a0fcff"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.952396 4848 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.952458 4848 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-util\") on node \"crc\" DevicePath \"\"" Jan 28 13:03:22 crc kubenswrapper[4848]: I0128 13:03:22.952475 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zk2tc\" (UniqueName: \"kubernetes.io/projected/eca5d9bf-13bb-40da-b40a-d9d656a0fcff-kube-api-access-zk2tc\") on node \"crc\" DevicePath \"\"" Jan 28 13:03:23 crc kubenswrapper[4848]: I0128 13:03:23.440151 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" event={"ID":"eca5d9bf-13bb-40da-b40a-d9d656a0fcff","Type":"ContainerDied","Data":"65ad45a39e6d2099a2140d801d87ece12e0c666deeb1a785111d3ac74ed6a7d7"} Jan 28 13:03:23 crc kubenswrapper[4848]: I0128 13:03:23.440201 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65ad45a39e6d2099a2140d801d87ece12e0c666deeb1a785111d3ac74ed6a7d7" Jan 28 13:03:23 crc kubenswrapper[4848]: I0128 13:03:23.440233 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.615642 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq"] Jan 28 13:03:28 crc kubenswrapper[4848]: E0128 13:03:28.616757 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerName="util" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.616775 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerName="util" Jan 28 13:03:28 crc kubenswrapper[4848]: E0128 13:03:28.616802 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerName="extract" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.616808 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerName="extract" Jan 28 13:03:28 crc kubenswrapper[4848]: E0128 13:03:28.616824 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerName="pull" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.616831 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerName="pull" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.616968 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="eca5d9bf-13bb-40da-b40a-d9d656a0fcff" containerName="extract" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.617604 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.622511 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-h5jkl" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.637512 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4gvm\" (UniqueName: \"kubernetes.io/projected/221cef79-cbf0-4a42-baca-872879406257-kube-api-access-q4gvm\") pod \"openstack-operator-controller-init-7db44d5f8c-t26mq\" (UID: \"221cef79-cbf0-4a42-baca-872879406257\") " pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.645731 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq"] Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.739831 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4gvm\" (UniqueName: \"kubernetes.io/projected/221cef79-cbf0-4a42-baca-872879406257-kube-api-access-q4gvm\") pod \"openstack-operator-controller-init-7db44d5f8c-t26mq\" (UID: \"221cef79-cbf0-4a42-baca-872879406257\") " pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.767649 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4gvm\" (UniqueName: \"kubernetes.io/projected/221cef79-cbf0-4a42-baca-872879406257-kube-api-access-q4gvm\") pod \"openstack-operator-controller-init-7db44d5f8c-t26mq\" (UID: \"221cef79-cbf0-4a42-baca-872879406257\") " pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" Jan 28 13:03:28 crc kubenswrapper[4848]: I0128 13:03:28.937908 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" Jan 28 13:03:29 crc kubenswrapper[4848]: I0128 13:03:29.453393 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq"] Jan 28 13:03:29 crc kubenswrapper[4848]: I0128 13:03:29.496486 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" event={"ID":"221cef79-cbf0-4a42-baca-872879406257","Type":"ContainerStarted","Data":"7b0a93d9059e513c438d68b23b8e2f414f81f20ff6e64d440f104a483528ce02"} Jan 28 13:03:36 crc kubenswrapper[4848]: I0128 13:03:36.555802 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" event={"ID":"221cef79-cbf0-4a42-baca-872879406257","Type":"ContainerStarted","Data":"b8d1c23210dba0cb7b1562f288911f8fa5eccfa795c3489805c251b22d52ded3"} Jan 28 13:03:36 crc kubenswrapper[4848]: I0128 13:03:36.556801 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" Jan 28 13:03:36 crc kubenswrapper[4848]: I0128 13:03:36.588663 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" podStartSLOduration=2.791301374 podStartE2EDuration="8.588633423s" podCreationTimestamp="2026-01-28 13:03:28 +0000 UTC" firstStartedPulling="2026-01-28 13:03:29.469311507 +0000 UTC m=+1036.381528545" lastFinishedPulling="2026-01-28 13:03:35.266643556 +0000 UTC m=+1042.178860594" observedRunningTime="2026-01-28 13:03:36.587112392 +0000 UTC m=+1043.499329430" watchObservedRunningTime="2026-01-28 13:03:36.588633423 +0000 UTC m=+1043.500850461" Jan 28 13:03:37 crc kubenswrapper[4848]: I0128 13:03:37.924945 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:03:37 crc kubenswrapper[4848]: I0128 13:03:37.925052 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:03:48 crc kubenswrapper[4848]: I0128 13:03:48.941744 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-7db44d5f8c-t26mq" Jan 28 13:04:07 crc kubenswrapper[4848]: I0128 13:04:07.924222 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:04:07 crc kubenswrapper[4848]: I0128 13:04:07.925012 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:04:07 crc kubenswrapper[4848]: I0128 13:04:07.925109 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:04:08 crc kubenswrapper[4848]: I0128 13:04:08.791133 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"875a982e7db5cc44931d699a4c51480a5860a252ccb155a317028cb1da4c99e1"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:04:08 crc kubenswrapper[4848]: I0128 13:04:08.791638 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://875a982e7db5cc44931d699a4c51480a5860a252ccb155a317028cb1da4c99e1" gracePeriod=600 Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.576311 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.578235 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.582577 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.583833 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.582616 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-7vwhw" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.586588 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-qmmc2" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.607641 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.618672 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.620076 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.622694 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-hnwfr" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.624064 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.643854 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v77r6\" (UniqueName: \"kubernetes.io/projected/f41ee80c-1ab9-4786-8fec-d7b3a12d545b-kube-api-access-v77r6\") pod \"barbican-operator-controller-manager-7f86f8796f-dj8qm\" (UID: \"f41ee80c-1ab9-4786-8fec-d7b3a12d545b\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.644132 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddtg4\" (UniqueName: \"kubernetes.io/projected/4747f67c-5dd8-415a-8ff5-c6b43e1142cf-kube-api-access-ddtg4\") pod \"cinder-operator-controller-manager-7478f7dbf9-cjt92\" (UID: \"4747f67c-5dd8-415a-8ff5-c6b43e1142cf\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.663628 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.734345 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.735683 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.740329 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-7l4wt" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.744677 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.745711 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.747452 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddtg4\" (UniqueName: \"kubernetes.io/projected/4747f67c-5dd8-415a-8ff5-c6b43e1142cf-kube-api-access-ddtg4\") pod \"cinder-operator-controller-manager-7478f7dbf9-cjt92\" (UID: \"4747f67c-5dd8-415a-8ff5-c6b43e1142cf\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.747515 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkpsl\" (UniqueName: \"kubernetes.io/projected/b29a79e7-07da-4c52-9798-e279092c28df-kube-api-access-zkpsl\") pod \"designate-operator-controller-manager-b45d7bf98-9nvdh\" (UID: \"b29a79e7-07da-4c52-9798-e279092c28df\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.747546 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v77r6\" (UniqueName: \"kubernetes.io/projected/f41ee80c-1ab9-4786-8fec-d7b3a12d545b-kube-api-access-v77r6\") pod \"barbican-operator-controller-manager-7f86f8796f-dj8qm\" (UID: \"f41ee80c-1ab9-4786-8fec-d7b3a12d545b\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.748416 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-mh9s6" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.769787 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.781956 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddtg4\" (UniqueName: \"kubernetes.io/projected/4747f67c-5dd8-415a-8ff5-c6b43e1142cf-kube-api-access-ddtg4\") pod \"cinder-operator-controller-manager-7478f7dbf9-cjt92\" (UID: \"4747f67c-5dd8-415a-8ff5-c6b43e1142cf\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.786759 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v77r6\" (UniqueName: \"kubernetes.io/projected/f41ee80c-1ab9-4786-8fec-d7b3a12d545b-kube-api-access-v77r6\") pod \"barbican-operator-controller-manager-7f86f8796f-dj8qm\" (UID: \"f41ee80c-1ab9-4786-8fec-d7b3a12d545b\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.809680 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.825717 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.833061 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.837847 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-pc62z" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.846721 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.846788 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.848003 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.849126 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkpsl\" (UniqueName: \"kubernetes.io/projected/b29a79e7-07da-4c52-9798-e279092c28df-kube-api-access-zkpsl\") pod \"designate-operator-controller-manager-b45d7bf98-9nvdh\" (UID: \"b29a79e7-07da-4c52-9798-e279092c28df\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.849237 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l77kw\" (UniqueName: \"kubernetes.io/projected/d20ac3bf-9cba-4074-962c-7ad7d7b17174-kube-api-access-l77kw\") pod \"glance-operator-controller-manager-78fdd796fd-tx7mn\" (UID: \"d20ac3bf-9cba-4074-962c-7ad7d7b17174\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.849279 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctnnn\" (UniqueName: \"kubernetes.io/projected/92cbecbc-09b7-4aa7-8511-dcc241d6b957-kube-api-access-ctnnn\") pod \"heat-operator-controller-manager-594c8c9d5d-9jqlp\" (UID: \"92cbecbc-09b7-4aa7-8511-dcc241d6b957\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.864008 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.864071 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.864106 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-dxgrn" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.866333 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="875a982e7db5cc44931d699a4c51480a5860a252ccb155a317028cb1da4c99e1" exitCode=0 Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.866395 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"875a982e7db5cc44931d699a4c51480a5860a252ccb155a317028cb1da4c99e1"} Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.866437 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"549672e6f36f329f8d879da83cfb4972802790c5bf74c410e2275cf97e32bb6c"} Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.866458 4848 scope.go:117] "RemoveContainer" containerID="9b3d7bb96bb73c79bf1b8f4103851f2633f3719121e6446c4bbfd8ad6b1a1178" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.878479 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkpsl\" (UniqueName: \"kubernetes.io/projected/b29a79e7-07da-4c52-9798-e279092c28df-kube-api-access-zkpsl\") pod \"designate-operator-controller-manager-b45d7bf98-9nvdh\" (UID: \"b29a79e7-07da-4c52-9798-e279092c28df\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.892140 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.893386 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.896155 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-p5dd5" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.898425 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.905589 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.919035 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.953515 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.953908 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srgnj\" (UniqueName: \"kubernetes.io/projected/e535d212-7524-4da1-9905-87af2259c702-kube-api-access-srgnj\") pod \"horizon-operator-controller-manager-77d5c5b54f-g54sg\" (UID: \"e535d212-7524-4da1-9905-87af2259c702\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.954706 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l77kw\" (UniqueName: \"kubernetes.io/projected/d20ac3bf-9cba-4074-962c-7ad7d7b17174-kube-api-access-l77kw\") pod \"glance-operator-controller-manager-78fdd796fd-tx7mn\" (UID: \"d20ac3bf-9cba-4074-962c-7ad7d7b17174\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.954746 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctnnn\" (UniqueName: \"kubernetes.io/projected/92cbecbc-09b7-4aa7-8511-dcc241d6b957-kube-api-access-ctnnn\") pod \"heat-operator-controller-manager-594c8c9d5d-9jqlp\" (UID: \"92cbecbc-09b7-4aa7-8511-dcc241d6b957\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.954805 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.954840 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zrsm\" (UniqueName: \"kubernetes.io/projected/0a7152e1-cedd-465b-a186-9a241ca98141-kube-api-access-9zrsm\") pod \"ironic-operator-controller-manager-598f7747c9-hfnz7\" (UID: \"0a7152e1-cedd-465b-a186-9a241ca98141\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.954933 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xrjj\" (UniqueName: \"kubernetes.io/projected/fe2e05c6-72db-4981-8b56-dc2a620003f2-kube-api-access-6xrjj\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.978666 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k"] Jan 28 13:04:09 crc kubenswrapper[4848]: I0128 13:04:09.979808 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.008940 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-xqbnq" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.013533 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.014139 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctnnn\" (UniqueName: \"kubernetes.io/projected/92cbecbc-09b7-4aa7-8511-dcc241d6b957-kube-api-access-ctnnn\") pod \"heat-operator-controller-manager-594c8c9d5d-9jqlp\" (UID: \"92cbecbc-09b7-4aa7-8511-dcc241d6b957\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.028662 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l77kw\" (UniqueName: \"kubernetes.io/projected/d20ac3bf-9cba-4074-962c-7ad7d7b17174-kube-api-access-l77kw\") pod \"glance-operator-controller-manager-78fdd796fd-tx7mn\" (UID: \"d20ac3bf-9cba-4074-962c-7ad7d7b17174\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.057554 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.058619 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.064804 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-xmwgw" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.067302 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.067387 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zrsm\" (UniqueName: \"kubernetes.io/projected/0a7152e1-cedd-465b-a186-9a241ca98141-kube-api-access-9zrsm\") pod \"ironic-operator-controller-manager-598f7747c9-hfnz7\" (UID: \"0a7152e1-cedd-465b-a186-9a241ca98141\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.067539 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xrjj\" (UniqueName: \"kubernetes.io/projected/fe2e05c6-72db-4981-8b56-dc2a620003f2-kube-api-access-6xrjj\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.067636 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srgnj\" (UniqueName: \"kubernetes.io/projected/e535d212-7524-4da1-9905-87af2259c702-kube-api-access-srgnj\") pod \"horizon-operator-controller-manager-77d5c5b54f-g54sg\" (UID: \"e535d212-7524-4da1-9905-87af2259c702\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.067777 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htqgd\" (UniqueName: \"kubernetes.io/projected/82ac0cb8-c28c-4242-8aa5-817aaf35ea3e-kube-api-access-htqgd\") pod \"keystone-operator-controller-manager-b8b6d4659-xb97k\" (UID: \"82ac0cb8-c28c-4242-8aa5-817aaf35ea3e\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.068030 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" Jan 28 13:04:10 crc kubenswrapper[4848]: E0128 13:04:10.068625 4848 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:10 crc kubenswrapper[4848]: E0128 13:04:10.068676 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert podName:fe2e05c6-72db-4981-8b56-dc2a620003f2 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:10.568652485 +0000 UTC m=+1077.480869523 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert") pod "infra-operator-controller-manager-694cf4f878-gcj9g" (UID: "fe2e05c6-72db-4981-8b56-dc2a620003f2") : secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.101000 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.103042 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.116897 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xrjj\" (UniqueName: \"kubernetes.io/projected/fe2e05c6-72db-4981-8b56-dc2a620003f2-kube-api-access-6xrjj\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.117277 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-fvzq7" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.130077 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zrsm\" (UniqueName: \"kubernetes.io/projected/0a7152e1-cedd-465b-a186-9a241ca98141-kube-api-access-9zrsm\") pod \"ironic-operator-controller-manager-598f7747c9-hfnz7\" (UID: \"0a7152e1-cedd-465b-a186-9a241ca98141\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.130192 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srgnj\" (UniqueName: \"kubernetes.io/projected/e535d212-7524-4da1-9905-87af2259c702-kube-api-access-srgnj\") pod \"horizon-operator-controller-manager-77d5c5b54f-g54sg\" (UID: \"e535d212-7524-4da1-9905-87af2259c702\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.140395 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.164668 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.169725 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.171329 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.174787 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrq7z\" (UniqueName: \"kubernetes.io/projected/39a4178e-2251-4cc9-bc57-2b46a5902a3d-kube-api-access-vrq7z\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f\" (UID: \"39a4178e-2251-4cc9-bc57-2b46a5902a3d\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.174944 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htqgd\" (UniqueName: \"kubernetes.io/projected/82ac0cb8-c28c-4242-8aa5-817aaf35ea3e-kube-api-access-htqgd\") pod \"keystone-operator-controller-manager-b8b6d4659-xb97k\" (UID: \"82ac0cb8-c28c-4242-8aa5-817aaf35ea3e\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.174985 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bhlr\" (UniqueName: \"kubernetes.io/projected/dedfeb84-9e8b-46f8-ac8f-0c5a85380160-kube-api-access-2bhlr\") pod \"manila-operator-controller-manager-78c6999f6f-v6mn8\" (UID: \"dedfeb84-9e8b-46f8-ac8f-0c5a85380160\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.181466 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-4dtnh" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.210430 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.235983 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.257050 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.268617 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.269992 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.276229 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-sdrvv" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.278558 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrq7z\" (UniqueName: \"kubernetes.io/projected/39a4178e-2251-4cc9-bc57-2b46a5902a3d-kube-api-access-vrq7z\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f\" (UID: \"39a4178e-2251-4cc9-bc57-2b46a5902a3d\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.278789 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffjsv\" (UniqueName: \"kubernetes.io/projected/365e9359-c6e2-428c-8889-95a232bb3e34-kube-api-access-ffjsv\") pod \"neutron-operator-controller-manager-78d58447c5-qpthc\" (UID: \"365e9359-c6e2-428c-8889-95a232bb3e34\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.278897 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bhlr\" (UniqueName: \"kubernetes.io/projected/dedfeb84-9e8b-46f8-ac8f-0c5a85380160-kube-api-access-2bhlr\") pod \"manila-operator-controller-manager-78c6999f6f-v6mn8\" (UID: \"dedfeb84-9e8b-46f8-ac8f-0c5a85380160\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.280613 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htqgd\" (UniqueName: \"kubernetes.io/projected/82ac0cb8-c28c-4242-8aa5-817aaf35ea3e-kube-api-access-htqgd\") pod \"keystone-operator-controller-manager-b8b6d4659-xb97k\" (UID: \"82ac0cb8-c28c-4242-8aa5-817aaf35ea3e\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.308410 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrq7z\" (UniqueName: \"kubernetes.io/projected/39a4178e-2251-4cc9-bc57-2b46a5902a3d-kube-api-access-vrq7z\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f\" (UID: \"39a4178e-2251-4cc9-bc57-2b46a5902a3d\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.308932 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bhlr\" (UniqueName: \"kubernetes.io/projected/dedfeb84-9e8b-46f8-ac8f-0c5a85380160-kube-api-access-2bhlr\") pod \"manila-operator-controller-manager-78c6999f6f-v6mn8\" (UID: \"dedfeb84-9e8b-46f8-ac8f-0c5a85380160\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.314049 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.341381 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.373289 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.382599 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffjsv\" (UniqueName: \"kubernetes.io/projected/365e9359-c6e2-428c-8889-95a232bb3e34-kube-api-access-ffjsv\") pod \"neutron-operator-controller-manager-78d58447c5-qpthc\" (UID: \"365e9359-c6e2-428c-8889-95a232bb3e34\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.382872 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88r6b\" (UniqueName: \"kubernetes.io/projected/34fd263e-f69d-4cc8-a003-ccb6f12273a6-kube-api-access-88r6b\") pod \"nova-operator-controller-manager-7bdb645866-jwvlh\" (UID: \"34fd263e-f69d-4cc8-a003-ccb6f12273a6\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.435407 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffjsv\" (UniqueName: \"kubernetes.io/projected/365e9359-c6e2-428c-8889-95a232bb3e34-kube-api-access-ffjsv\") pod \"neutron-operator-controller-manager-78d58447c5-qpthc\" (UID: \"365e9359-c6e2-428c-8889-95a232bb3e34\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.436385 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.441422 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.446618 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-g8khv" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.451840 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.460894 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.463074 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.468594 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.469999 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.491499 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-9frbq" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.491721 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.495078 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-g24ht" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.495374 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dldjb\" (UniqueName: \"kubernetes.io/projected/8f0ab1f6-45a7-4731-b418-f9131c97217a-kube-api-access-dldjb\") pod \"octavia-operator-controller-manager-5f4cd88d46-mckcj\" (UID: \"8f0ab1f6-45a7-4731-b418-f9131c97217a\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.495591 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88r6b\" (UniqueName: \"kubernetes.io/projected/34fd263e-f69d-4cc8-a003-ccb6f12273a6-kube-api-access-88r6b\") pod \"nova-operator-controller-manager-7bdb645866-jwvlh\" (UID: \"34fd263e-f69d-4cc8-a003-ccb6f12273a6\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.537650 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.553684 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.557152 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.557879 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.560416 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-5bvrj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.567412 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.572279 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88r6b\" (UniqueName: \"kubernetes.io/projected/34fd263e-f69d-4cc8-a003-ccb6f12273a6-kube-api-access-88r6b\") pod \"nova-operator-controller-manager-7bdb645866-jwvlh\" (UID: \"34fd263e-f69d-4cc8-a003-ccb6f12273a6\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.596340 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.597465 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.597744 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.599062 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dldjb\" (UniqueName: \"kubernetes.io/projected/8f0ab1f6-45a7-4731-b418-f9131c97217a-kube-api-access-dldjb\") pod \"octavia-operator-controller-manager-5f4cd88d46-mckcj\" (UID: \"8f0ab1f6-45a7-4731-b418-f9131c97217a\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.599153 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.599194 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpqgw\" (UniqueName: \"kubernetes.io/projected/390dea01-5c38-4c87-98c2-32f655af4a62-kube-api-access-fpqgw\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.599266 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcsks\" (UniqueName: \"kubernetes.io/projected/2c9667bf-ec8d-4064-b52e-e5a0f55f09a3-kube-api-access-mcsks\") pod \"ovn-operator-controller-manager-6f75f45d54-csw7g\" (UID: \"2c9667bf-ec8d-4064-b52e-e5a0f55f09a3\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.599308 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:10 crc kubenswrapper[4848]: E0128 13:04:10.599495 4848 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:10 crc kubenswrapper[4848]: E0128 13:04:10.599549 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert podName:fe2e05c6-72db-4981-8b56-dc2a620003f2 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:11.599526244 +0000 UTC m=+1078.511743282 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert") pod "infra-operator-controller-manager-694cf4f878-gcj9g" (UID: "fe2e05c6-72db-4981-8b56-dc2a620003f2") : secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.601344 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-29t8g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.631266 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.649618 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.651884 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dldjb\" (UniqueName: \"kubernetes.io/projected/8f0ab1f6-45a7-4731-b418-f9131c97217a-kube-api-access-dldjb\") pod \"octavia-operator-controller-manager-5f4cd88d46-mckcj\" (UID: \"8f0ab1f6-45a7-4731-b418-f9131c97217a\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.666186 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.668939 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.675619 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-9mtd2" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.676124 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.680033 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.682852 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.684580 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.692918 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.703030 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpqgw\" (UniqueName: \"kubernetes.io/projected/390dea01-5c38-4c87-98c2-32f655af4a62-kube-api-access-fpqgw\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.703222 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcsks\" (UniqueName: \"kubernetes.io/projected/2c9667bf-ec8d-4064-b52e-e5a0f55f09a3-kube-api-access-mcsks\") pod \"ovn-operator-controller-manager-6f75f45d54-csw7g\" (UID: \"2c9667bf-ec8d-4064-b52e-e5a0f55f09a3\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.703310 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4nmn\" (UniqueName: \"kubernetes.io/projected/1dada58b-0b20-4d23-aa46-164beef54624-kube-api-access-q4nmn\") pod \"swift-operator-controller-manager-547cbdb99f-cxnsf\" (UID: \"1dada58b-0b20-4d23-aa46-164beef54624\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.703352 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.703387 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hmn4\" (UniqueName: \"kubernetes.io/projected/ee8c2e3c-2df5-43aa-b624-e82e4cff81fb-kube-api-access-8hmn4\") pod \"placement-operator-controller-manager-79d5ccc684-2g2qj\" (UID: \"ee8c2e3c-2df5-43aa-b624-e82e4cff81fb\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.704070 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-85p78" Jan 28 13:04:10 crc kubenswrapper[4848]: E0128 13:04:10.707986 4848 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:10 crc kubenswrapper[4848]: E0128 13:04:10.708111 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert podName:390dea01-5c38-4c87-98c2-32f655af4a62 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:11.208076499 +0000 UTC m=+1078.120293537 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" (UID: "390dea01-5c38-4c87-98c2-32f655af4a62") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.711949 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.748067 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcsks\" (UniqueName: \"kubernetes.io/projected/2c9667bf-ec8d-4064-b52e-e5a0f55f09a3-kube-api-access-mcsks\") pod \"ovn-operator-controller-manager-6f75f45d54-csw7g\" (UID: \"2c9667bf-ec8d-4064-b52e-e5a0f55f09a3\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.754708 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpqgw\" (UniqueName: \"kubernetes.io/projected/390dea01-5c38-4c87-98c2-32f655af4a62-kube-api-access-fpqgw\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.764905 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.791503 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.792883 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.797660 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-ftp46" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.806786 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nkfs\" (UniqueName: \"kubernetes.io/projected/164ef38a-92cd-4442-8925-509ba68366ba-kube-api-access-2nkfs\") pod \"telemetry-operator-controller-manager-85cd9769bb-s8mg8\" (UID: \"164ef38a-92cd-4442-8925-509ba68366ba\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.807964 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hmn4\" (UniqueName: \"kubernetes.io/projected/ee8c2e3c-2df5-43aa-b624-e82e4cff81fb-kube-api-access-8hmn4\") pod \"placement-operator-controller-manager-79d5ccc684-2g2qj\" (UID: \"ee8c2e3c-2df5-43aa-b624-e82e4cff81fb\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.808016 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnhlg\" (UniqueName: \"kubernetes.io/projected/2801f0da-025c-46a4-a123-6e71c300b025-kube-api-access-dnhlg\") pod \"test-operator-controller-manager-69797bbcbd-5h8th\" (UID: \"2801f0da-025c-46a4-a123-6e71c300b025\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.808621 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4nmn\" (UniqueName: \"kubernetes.io/projected/1dada58b-0b20-4d23-aa46-164beef54624-kube-api-access-q4nmn\") pod \"swift-operator-controller-manager-547cbdb99f-cxnsf\" (UID: \"1dada58b-0b20-4d23-aa46-164beef54624\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.828894 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.837593 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hmn4\" (UniqueName: \"kubernetes.io/projected/ee8c2e3c-2df5-43aa-b624-e82e4cff81fb-kube-api-access-8hmn4\") pod \"placement-operator-controller-manager-79d5ccc684-2g2qj\" (UID: \"ee8c2e3c-2df5-43aa-b624-e82e4cff81fb\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.853560 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4nmn\" (UniqueName: \"kubernetes.io/projected/1dada58b-0b20-4d23-aa46-164beef54624-kube-api-access-q4nmn\") pod \"swift-operator-controller-manager-547cbdb99f-cxnsf\" (UID: \"1dada58b-0b20-4d23-aa46-164beef54624\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" Jan 28 13:04:10 crc kubenswrapper[4848]: W0128 13:04:10.863548 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4747f67c_5dd8_415a_8ff5_c6b43e1142cf.slice/crio-7e41ea326c13c71801536de4026447018d088585546934ce837c751196ef9971 WatchSource:0}: Error finding container 7e41ea326c13c71801536de4026447018d088585546934ce837c751196ef9971: Status 404 returned error can't find the container with id 7e41ea326c13c71801536de4026447018d088585546934ce837c751196ef9971 Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.888334 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.889364 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.892847 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.895236 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-z6n5j" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.895535 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.895718 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.910349 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnhlg\" (UniqueName: \"kubernetes.io/projected/2801f0da-025c-46a4-a123-6e71c300b025-kube-api-access-dnhlg\") pod \"test-operator-controller-manager-69797bbcbd-5h8th\" (UID: \"2801f0da-025c-46a4-a123-6e71c300b025\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.910543 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t8x4\" (UniqueName: \"kubernetes.io/projected/0593b76f-9225-457e-9c0f-186dc73f37a3-kube-api-access-7t8x4\") pod \"watcher-operator-controller-manager-59c5775db7-r2ppl\" (UID: \"0593b76f-9225-457e-9c0f-186dc73f37a3\") " pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.910606 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nkfs\" (UniqueName: \"kubernetes.io/projected/164ef38a-92cd-4442-8925-509ba68366ba-kube-api-access-2nkfs\") pod \"telemetry-operator-controller-manager-85cd9769bb-s8mg8\" (UID: \"164ef38a-92cd-4442-8925-509ba68366ba\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.912183 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86"] Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.913675 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.921011 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-x8vnw" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.921050 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" event={"ID":"4747f67c-5dd8-415a-8ff5-c6b43e1142cf","Type":"ContainerStarted","Data":"7e41ea326c13c71801536de4026447018d088585546934ce837c751196ef9971"} Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.921892 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.940233 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nkfs\" (UniqueName: \"kubernetes.io/projected/164ef38a-92cd-4442-8925-509ba68366ba-kube-api-access-2nkfs\") pod \"telemetry-operator-controller-manager-85cd9769bb-s8mg8\" (UID: \"164ef38a-92cd-4442-8925-509ba68366ba\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.940451 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnhlg\" (UniqueName: \"kubernetes.io/projected/2801f0da-025c-46a4-a123-6e71c300b025-kube-api-access-dnhlg\") pod \"test-operator-controller-manager-69797bbcbd-5h8th\" (UID: \"2801f0da-025c-46a4-a123-6e71c300b025\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" Jan 28 13:04:10 crc kubenswrapper[4848]: I0128 13:04:10.977316 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.015063 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnst7\" (UniqueName: \"kubernetes.io/projected/04f3b1d4-2f58-42d7-962c-d7a940b93469-kube-api-access-qnst7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-8mp86\" (UID: \"04f3b1d4-2f58-42d7-962c-d7a940b93469\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.015128 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7j2m\" (UniqueName: \"kubernetes.io/projected/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-kube-api-access-j7j2m\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.016263 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t8x4\" (UniqueName: \"kubernetes.io/projected/0593b76f-9225-457e-9c0f-186dc73f37a3-kube-api-access-7t8x4\") pod \"watcher-operator-controller-manager-59c5775db7-r2ppl\" (UID: \"0593b76f-9225-457e-9c0f-186dc73f37a3\") " pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.017006 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.017159 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.026068 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.050489 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t8x4\" (UniqueName: \"kubernetes.io/projected/0593b76f-9225-457e-9c0f-186dc73f37a3-kube-api-access-7t8x4\") pod \"watcher-operator-controller-manager-59c5775db7-r2ppl\" (UID: \"0593b76f-9225-457e-9c0f-186dc73f37a3\") " pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.072841 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.085402 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.121878 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.121941 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7j2m\" (UniqueName: \"kubernetes.io/projected/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-kube-api-access-j7j2m\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.121967 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnst7\" (UniqueName: \"kubernetes.io/projected/04f3b1d4-2f58-42d7-962c-d7a940b93469-kube-api-access-qnst7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-8mp86\" (UID: \"04f3b1d4-2f58-42d7-962c-d7a940b93469\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.122067 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.122197 4848 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.122327 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:11.622286803 +0000 UTC m=+1078.534503841 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "metrics-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.122591 4848 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.122664 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:11.622609961 +0000 UTC m=+1078.534826999 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.125674 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.129345 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.196353 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.220065 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.225967 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnst7\" (UniqueName: \"kubernetes.io/projected/04f3b1d4-2f58-42d7-962c-d7a940b93469-kube-api-access-qnst7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-8mp86\" (UID: \"04f3b1d4-2f58-42d7-962c-d7a940b93469\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.228341 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7j2m\" (UniqueName: \"kubernetes.io/projected/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-kube-api-access-j7j2m\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.237748 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.253444 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.254677 4848 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.275471 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert podName:390dea01-5c38-4c87-98c2-32f655af4a62 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:12.275431775 +0000 UTC m=+1079.187648813 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" (UID: "390dea01-5c38-4c87-98c2-32f655af4a62") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.271460 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.275975 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.283589 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.320617 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7"] Jan 28 13:04:11 crc kubenswrapper[4848]: W0128 13:04:11.331454 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd20ac3bf_9cba_4074_962c_7ad7d7b17174.slice/crio-c442ddb4f0250f5bf3b5d32bb6879e8a6b5c94e6b880cf21d4c37031aa5c3186 WatchSource:0}: Error finding container c442ddb4f0250f5bf3b5d32bb6879e8a6b5c94e6b880cf21d4c37031aa5c3186: Status 404 returned error can't find the container with id c442ddb4f0250f5bf3b5d32bb6879e8a6b5c94e6b880cf21d4c37031aa5c3186 Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.352042 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.584272 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.596346 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.660023 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.675063 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.675213 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.675280 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.675453 4848 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.675518 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:12.675499552 +0000 UTC m=+1079.587716590 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "metrics-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.675598 4848 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.675714 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert podName:fe2e05c6-72db-4981-8b56-dc2a620003f2 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:13.675684727 +0000 UTC m=+1080.587901815 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert") pod "infra-operator-controller-manager-694cf4f878-gcj9g" (UID: "fe2e05c6-72db-4981-8b56-dc2a620003f2") : secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.675778 4848 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: E0128 13:04:11.675808 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:12.67579942 +0000 UTC m=+1079.588016548 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "webhook-server-cert" not found Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.751365 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k"] Jan 28 13:04:11 crc kubenswrapper[4848]: W0128 13:04:11.857883 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f0ab1f6_45a7_4731_b418_f9131c97217a.slice/crio-2ff0721f5f67c7b4189817e5ecf325362cde446c261ebe043849ea361614198e WatchSource:0}: Error finding container 2ff0721f5f67c7b4189817e5ecf325362cde446c261ebe043849ea361614198e: Status 404 returned error can't find the container with id 2ff0721f5f67c7b4189817e5ecf325362cde446c261ebe043849ea361614198e Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.859943 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc"] Jan 28 13:04:11 crc kubenswrapper[4848]: W0128 13:04:11.868575 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod365e9359_c6e2_428c_8889_95a232bb3e34.slice/crio-25c1dc995e9db128ac48eb751ab59089c8e2b0211ca6c44adb632792de63fb4a WatchSource:0}: Error finding container 25c1dc995e9db128ac48eb751ab59089c8e2b0211ca6c44adb632792de63fb4a: Status 404 returned error can't find the container with id 25c1dc995e9db128ac48eb751ab59089c8e2b0211ca6c44adb632792de63fb4a Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.879049 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj"] Jan 28 13:04:11 crc kubenswrapper[4848]: W0128 13:04:11.883103 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34fd263e_f69d_4cc8_a003_ccb6f12273a6.slice/crio-5cabf6efd8277a3c98159416b70848d37cd88565e9d17ec9f156e0121604e468 WatchSource:0}: Error finding container 5cabf6efd8277a3c98159416b70848d37cd88565e9d17ec9f156e0121604e468: Status 404 returned error can't find the container with id 5cabf6efd8277a3c98159416b70848d37cd88565e9d17ec9f156e0121604e468 Jan 28 13:04:11 crc kubenswrapper[4848]: W0128 13:04:11.895296 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c9667bf_ec8d_4064_b52e_e5a0f55f09a3.slice/crio-2d5e838d1fb5b4e1b38b15f60b3bc1cc4de7851769546b63a57918ffe919d725 WatchSource:0}: Error finding container 2d5e838d1fb5b4e1b38b15f60b3bc1cc4de7851769546b63a57918ffe919d725: Status 404 returned error can't find the container with id 2d5e838d1fb5b4e1b38b15f60b3bc1cc4de7851769546b63a57918ffe919d725 Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.899617 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.911044 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.965743 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf"] Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.975722 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" event={"ID":"dedfeb84-9e8b-46f8-ac8f-0c5a85380160","Type":"ContainerStarted","Data":"6730efd0ccb23591df09ddc4c9bec247b66f7824c0143886984141dee50079c6"} Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.981051 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" event={"ID":"d20ac3bf-9cba-4074-962c-7ad7d7b17174","Type":"ContainerStarted","Data":"c442ddb4f0250f5bf3b5d32bb6879e8a6b5c94e6b880cf21d4c37031aa5c3186"} Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.982004 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" event={"ID":"92cbecbc-09b7-4aa7-8511-dcc241d6b957","Type":"ContainerStarted","Data":"886bca7183ed5a79438f099a868ada1449bedc85e644639a648c8525f05f1529"} Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.987502 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" event={"ID":"f41ee80c-1ab9-4786-8fec-d7b3a12d545b","Type":"ContainerStarted","Data":"83926f6d6b9020748dc9311f851a28f0d560961e5df657ac2adb5a06fc20a823"} Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.990103 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" event={"ID":"365e9359-c6e2-428c-8889-95a232bb3e34","Type":"ContainerStarted","Data":"25c1dc995e9db128ac48eb751ab59089c8e2b0211ca6c44adb632792de63fb4a"} Jan 28 13:04:11 crc kubenswrapper[4848]: I0128 13:04:11.991706 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" event={"ID":"34fd263e-f69d-4cc8-a003-ccb6f12273a6","Type":"ContainerStarted","Data":"5cabf6efd8277a3c98159416b70848d37cd88565e9d17ec9f156e0121604e468"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.016995 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" event={"ID":"8f0ab1f6-45a7-4731-b418-f9131c97217a","Type":"ContainerStarted","Data":"2ff0721f5f67c7b4189817e5ecf325362cde446c261ebe043849ea361614198e"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.030488 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" event={"ID":"0a7152e1-cedd-465b-a186-9a241ca98141","Type":"ContainerStarted","Data":"63e4bc3646365be4d26b466af192a844c0e68a86ef101c3d4b82917515ec554a"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.039005 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" event={"ID":"e535d212-7524-4da1-9905-87af2259c702","Type":"ContainerStarted","Data":"02849c0e6e33c897549f5474498999943bb045ff718381231c4f041e07d2e848"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.041387 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" event={"ID":"39a4178e-2251-4cc9-bc57-2b46a5902a3d","Type":"ContainerStarted","Data":"41f40fdc31797eec0987640bdc8e398dc2b568a83ece1f5529a1892d5e35ef95"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.042910 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" event={"ID":"2c9667bf-ec8d-4064-b52e-e5a0f55f09a3","Type":"ContainerStarted","Data":"2d5e838d1fb5b4e1b38b15f60b3bc1cc4de7851769546b63a57918ffe919d725"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.044565 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" event={"ID":"b29a79e7-07da-4c52-9798-e279092c28df","Type":"ContainerStarted","Data":"b876bfeb9ba205f4f26f49dda343ed180274564e3372bae04beef3eabac39399"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.046645 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" event={"ID":"82ac0cb8-c28c-4242-8aa5-817aaf35ea3e","Type":"ContainerStarted","Data":"67f59b088c608f192dc1a43ce8f65d3ef79344b3d254d0d02ff38584ee364ce9"} Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.078562 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj"] Jan 28 13:04:12 crc kubenswrapper[4848]: W0128 13:04:12.104361 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee8c2e3c_2df5_43aa_b624_e82e4cff81fb.slice/crio-5e6ae342e79cfaaba1f44ede5e4379e3a684ecef921f454632995a13e89919e4 WatchSource:0}: Error finding container 5e6ae342e79cfaaba1f44ede5e4379e3a684ecef921f454632995a13e89919e4: Status 404 returned error can't find the container with id 5e6ae342e79cfaaba1f44ede5e4379e3a684ecef921f454632995a13e89919e4 Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.113680 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8hmn4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-79d5ccc684-2g2qj_openstack-operators(ee8c2e3c-2df5-43aa-b624-e82e4cff81fb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.115430 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" podUID="ee8c2e3c-2df5-43aa-b624-e82e4cff81fb" Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.163603 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8"] Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.262426 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86"] Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.273014 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qnst7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-8mp86_openstack-operators(04f3b1d4-2f58-42d7-962c-d7a940b93469): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.274526 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th"] Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.274677 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" podUID="04f3b1d4-2f58-42d7-962c-d7a940b93469" Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.287987 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.288303 4848 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.288487 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert podName:390dea01-5c38-4c87-98c2-32f655af4a62 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:14.288410162 +0000 UTC m=+1081.200627360 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" (UID: "390dea01-5c38-4c87-98c2-32f655af4a62") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.316609 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl"] Jan 28 13:04:12 crc kubenswrapper[4848]: W0128 13:04:12.340616 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2801f0da_025c_46a4_a123_6e71c300b025.slice/crio-de31bc9b326b97b864391816869c11dfc4db02f1740721ad7bc344df6c7f53f9 WatchSource:0}: Error finding container de31bc9b326b97b864391816869c11dfc4db02f1740721ad7bc344df6c7f53f9: Status 404 returned error can't find the container with id de31bc9b326b97b864391816869c11dfc4db02f1740721ad7bc344df6c7f53f9 Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.342964 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dnhlg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-5h8th_openstack-operators(2801f0da-025c-46a4-a123-6e71c300b025): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.344401 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" podUID="2801f0da-025c-46a4-a123-6e71c300b025" Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.694768 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:12 crc kubenswrapper[4848]: I0128 13:04:12.694849 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.695110 4848 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.695110 4848 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.695313 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:14.695203693 +0000 UTC m=+1081.607420911 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "webhook-server-cert" not found Jan 28 13:04:12 crc kubenswrapper[4848]: E0128 13:04:12.695343 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:14.695329216 +0000 UTC m=+1081.607546464 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "metrics-server-cert" not found Jan 28 13:04:13 crc kubenswrapper[4848]: I0128 13:04:13.062836 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" event={"ID":"1dada58b-0b20-4d23-aa46-164beef54624","Type":"ContainerStarted","Data":"788d9cedf5014467a0218366045cb1e16c44f66f72d52be40fc7c82619baea43"} Jan 28 13:04:13 crc kubenswrapper[4848]: I0128 13:04:13.065629 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" event={"ID":"164ef38a-92cd-4442-8925-509ba68366ba","Type":"ContainerStarted","Data":"b5d067696dd7d1b392293f96f69ba5dc46cc31f0f60acc83472f1439c011a3b4"} Jan 28 13:04:13 crc kubenswrapper[4848]: I0128 13:04:13.067786 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" event={"ID":"04f3b1d4-2f58-42d7-962c-d7a940b93469","Type":"ContainerStarted","Data":"d255bacd18a4f46d3fcb01350e01d874ea8f8c5801feea886519a87cf9a0d580"} Jan 28 13:04:13 crc kubenswrapper[4848]: E0128 13:04:13.070891 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" podUID="04f3b1d4-2f58-42d7-962c-d7a940b93469" Jan 28 13:04:13 crc kubenswrapper[4848]: I0128 13:04:13.107495 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" event={"ID":"ee8c2e3c-2df5-43aa-b624-e82e4cff81fb","Type":"ContainerStarted","Data":"5e6ae342e79cfaaba1f44ede5e4379e3a684ecef921f454632995a13e89919e4"} Jan 28 13:04:13 crc kubenswrapper[4848]: E0128 13:04:13.110788 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" podUID="ee8c2e3c-2df5-43aa-b624-e82e4cff81fb" Jan 28 13:04:13 crc kubenswrapper[4848]: I0128 13:04:13.111292 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" event={"ID":"0593b76f-9225-457e-9c0f-186dc73f37a3","Type":"ContainerStarted","Data":"491e015daee4857d7b00c3b9610a014991503b62fa1a1c6d565788204c33cb16"} Jan 28 13:04:13 crc kubenswrapper[4848]: I0128 13:04:13.115559 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" event={"ID":"2801f0da-025c-46a4-a123-6e71c300b025","Type":"ContainerStarted","Data":"de31bc9b326b97b864391816869c11dfc4db02f1740721ad7bc344df6c7f53f9"} Jan 28 13:04:13 crc kubenswrapper[4848]: E0128 13:04:13.118542 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" podUID="2801f0da-025c-46a4-a123-6e71c300b025" Jan 28 13:04:13 crc kubenswrapper[4848]: I0128 13:04:13.733598 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:13 crc kubenswrapper[4848]: E0128 13:04:13.735498 4848 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:13 crc kubenswrapper[4848]: E0128 13:04:13.735672 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert podName:fe2e05c6-72db-4981-8b56-dc2a620003f2 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:17.73563798 +0000 UTC m=+1084.647855018 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert") pod "infra-operator-controller-manager-694cf4f878-gcj9g" (UID: "fe2e05c6-72db-4981-8b56-dc2a620003f2") : secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.144864 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" podUID="2801f0da-025c-46a4-a123-6e71c300b025" Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.156109 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" podUID="ee8c2e3c-2df5-43aa-b624-e82e4cff81fb" Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.159318 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" podUID="04f3b1d4-2f58-42d7-962c-d7a940b93469" Jan 28 13:04:14 crc kubenswrapper[4848]: I0128 13:04:14.346687 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.346811 4848 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.346900 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert podName:390dea01-5c38-4c87-98c2-32f655af4a62 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:18.346880954 +0000 UTC m=+1085.259097992 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" (UID: "390dea01-5c38-4c87-98c2-32f655af4a62") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:14 crc kubenswrapper[4848]: I0128 13:04:14.757734 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:14 crc kubenswrapper[4848]: I0128 13:04:14.757803 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.757945 4848 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.758063 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:18.758038073 +0000 UTC m=+1085.670255101 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "webhook-server-cert" not found Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.758058 4848 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 13:04:14 crc kubenswrapper[4848]: E0128 13:04:14.758163 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:18.758135767 +0000 UTC m=+1085.670352805 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "metrics-server-cert" not found Jan 28 13:04:17 crc kubenswrapper[4848]: I0128 13:04:17.823889 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:17 crc kubenswrapper[4848]: E0128 13:04:17.824135 4848 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:17 crc kubenswrapper[4848]: E0128 13:04:17.824810 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert podName:fe2e05c6-72db-4981-8b56-dc2a620003f2 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:25.824787174 +0000 UTC m=+1092.737004212 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert") pod "infra-operator-controller-manager-694cf4f878-gcj9g" (UID: "fe2e05c6-72db-4981-8b56-dc2a620003f2") : secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:18 crc kubenswrapper[4848]: I0128 13:04:18.433824 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:18 crc kubenswrapper[4848]: E0128 13:04:18.434081 4848 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:18 crc kubenswrapper[4848]: E0128 13:04:18.434145 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert podName:390dea01-5c38-4c87-98c2-32f655af4a62 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:26.434127237 +0000 UTC m=+1093.346344275 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" (UID: "390dea01-5c38-4c87-98c2-32f655af4a62") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:18 crc kubenswrapper[4848]: I0128 13:04:18.840422 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:18 crc kubenswrapper[4848]: I0128 13:04:18.840920 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:18 crc kubenswrapper[4848]: E0128 13:04:18.841174 4848 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 13:04:18 crc kubenswrapper[4848]: E0128 13:04:18.841238 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:26.841222605 +0000 UTC m=+1093.753439643 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "metrics-server-cert" not found Jan 28 13:04:18 crc kubenswrapper[4848]: E0128 13:04:18.841335 4848 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 13:04:18 crc kubenswrapper[4848]: E0128 13:04:18.841456 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:26.841436711 +0000 UTC m=+1093.753653779 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "webhook-server-cert" not found Jan 28 13:04:25 crc kubenswrapper[4848]: I0128 13:04:25.876273 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:25 crc kubenswrapper[4848]: E0128 13:04:25.876300 4848 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:25 crc kubenswrapper[4848]: E0128 13:04:25.877270 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert podName:fe2e05c6-72db-4981-8b56-dc2a620003f2 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:41.877226056 +0000 UTC m=+1108.789443094 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert") pod "infra-operator-controller-manager-694cf4f878-gcj9g" (UID: "fe2e05c6-72db-4981-8b56-dc2a620003f2") : secret "infra-operator-webhook-server-cert" not found Jan 28 13:04:26 crc kubenswrapper[4848]: I0128 13:04:26.486063 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:26 crc kubenswrapper[4848]: E0128 13:04:26.486328 4848 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:26 crc kubenswrapper[4848]: E0128 13:04:26.486411 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert podName:390dea01-5c38-4c87-98c2-32f655af4a62 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:42.486389694 +0000 UTC m=+1109.398606732 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" (UID: "390dea01-5c38-4c87-98c2-32f655af4a62") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 13:04:26 crc kubenswrapper[4848]: I0128 13:04:26.892493 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:26 crc kubenswrapper[4848]: I0128 13:04:26.892552 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:26 crc kubenswrapper[4848]: E0128 13:04:26.892680 4848 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 13:04:26 crc kubenswrapper[4848]: E0128 13:04:26.892735 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:42.892719331 +0000 UTC m=+1109.804936369 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "metrics-server-cert" not found Jan 28 13:04:26 crc kubenswrapper[4848]: E0128 13:04:26.892749 4848 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 13:04:26 crc kubenswrapper[4848]: E0128 13:04:26.892940 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs podName:ef39eedb-8ccb-47f4-af2c-faee2565e2c9 nodeName:}" failed. No retries permitted until 2026-01-28 13:04:42.892920817 +0000 UTC m=+1109.805137855 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs") pod "openstack-operator-controller-manager-6b67879f4f-c5rbp" (UID: "ef39eedb-8ccb-47f4-af2c-faee2565e2c9") : secret "webhook-server-cert" not found Jan 28 13:04:33 crc kubenswrapper[4848]: E0128 13:04:33.509823 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658" Jan 28 13:04:33 crc kubenswrapper[4848]: E0128 13:04:33.511578 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-88r6b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-7bdb645866-jwvlh_openstack-operators(34fd263e-f69d-4cc8-a003-ccb6f12273a6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:33 crc kubenswrapper[4848]: E0128 13:04:33.512879 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" podUID="34fd263e-f69d-4cc8-a003-ccb6f12273a6" Jan 28 13:04:34 crc kubenswrapper[4848]: E0128 13:04:34.335375 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658\\\"\"" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" podUID="34fd263e-f69d-4cc8-a003-ccb6f12273a6" Jan 28 13:04:34 crc kubenswrapper[4848]: E0128 13:04:34.481714 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e" Jan 28 13:04:34 crc kubenswrapper[4848]: E0128 13:04:34.481962 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ffjsv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-78d58447c5-qpthc_openstack-operators(365e9359-c6e2-428c-8889-95a232bb3e34): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:34 crc kubenswrapper[4848]: E0128 13:04:34.483208 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" podUID="365e9359-c6e2-428c-8889-95a232bb3e34" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.208067 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.208325 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2nkfs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-85cd9769bb-s8mg8_openstack-operators(164ef38a-92cd-4442-8925-509ba68366ba): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.209643 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" podUID="164ef38a-92cd-4442-8925-509ba68366ba" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.343172 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" podUID="365e9359-c6e2-428c-8889-95a232bb3e34" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.343266 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" podUID="164ef38a-92cd-4442-8925-509ba68366ba" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.891817 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.892157 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zkpsl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-b45d7bf98-9nvdh_openstack-operators(b29a79e7-07da-4c52-9798-e279092c28df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:35 crc kubenswrapper[4848]: E0128 13:04:35.894868 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" podUID="b29a79e7-07da-4c52-9798-e279092c28df" Jan 28 13:04:36 crc kubenswrapper[4848]: E0128 13:04:36.364137 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece\\\"\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" podUID="b29a79e7-07da-4c52-9798-e279092c28df" Jan 28 13:04:36 crc kubenswrapper[4848]: E0128 13:04:36.745868 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:c94116e32fb9af850accd9d7ae46765559eef3fbe2ba75472c1c1ac91b2c33fd" Jan 28 13:04:36 crc kubenswrapper[4848]: E0128 13:04:36.746115 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:c94116e32fb9af850accd9d7ae46765559eef3fbe2ba75472c1c1ac91b2c33fd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v77r6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7f86f8796f-dj8qm_openstack-operators(f41ee80c-1ab9-4786-8fec-d7b3a12d545b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:36 crc kubenswrapper[4848]: E0128 13:04:36.747367 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" podUID="f41ee80c-1ab9-4786-8fec-d7b3a12d545b" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.325031 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:b916c87806b7eadd83e0ca890c3c24fb990fc5beb48ddc4537e3384efd3e62f7" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.325280 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:b916c87806b7eadd83e0ca890c3c24fb990fc5beb48ddc4537e3384efd3e62f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ddtg4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-7478f7dbf9-cjt92_openstack-operators(4747f67c-5dd8-415a-8ff5-c6b43e1142cf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.326953 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" podUID="4747f67c-5dd8-415a-8ff5-c6b43e1142cf" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.372774 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:c94116e32fb9af850accd9d7ae46765559eef3fbe2ba75472c1c1ac91b2c33fd\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" podUID="f41ee80c-1ab9-4786-8fec-d7b3a12d545b" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.374129 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:b916c87806b7eadd83e0ca890c3c24fb990fc5beb48ddc4537e3384efd3e62f7\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" podUID="4747f67c-5dd8-415a-8ff5-c6b43e1142cf" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.408945 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/openstack-k8s-operators/watcher-operator:c8423aabba1b2a4414a2ef6281a82968f03d97d8" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.409010 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/openstack-k8s-operators/watcher-operator:c8423aabba1b2a4414a2ef6281a82968f03d97d8" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.409168 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.20:5001/openstack-k8s-operators/watcher-operator:c8423aabba1b2a4414a2ef6281a82968f03d97d8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7t8x4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-59c5775db7-r2ppl_openstack-operators(0593b76f-9225-457e-9c0f-186dc73f37a3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.410780 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" podUID="0593b76f-9225-457e-9c0f-186dc73f37a3" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.980485 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.980855 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-htqgd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-xb97k_openstack-operators(82ac0cb8-c28c-4242-8aa5-817aaf35ea3e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:04:37 crc kubenswrapper[4848]: E0128 13:04:37.982195 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" podUID="82ac0cb8-c28c-4242-8aa5-817aaf35ea3e" Jan 28 13:04:38 crc kubenswrapper[4848]: E0128 13:04:38.381296 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" podUID="82ac0cb8-c28c-4242-8aa5-817aaf35ea3e" Jan 28 13:04:38 crc kubenswrapper[4848]: E0128 13:04:38.382047 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/openstack-k8s-operators/watcher-operator:c8423aabba1b2a4414a2ef6281a82968f03d97d8\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" podUID="0593b76f-9225-457e-9c0f-186dc73f37a3" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.411417 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" event={"ID":"e535d212-7524-4da1-9905-87af2259c702","Type":"ContainerStarted","Data":"0b7b37e8436cfa8ca41e8272da5da0e2855a5c359935a33b1f2c26ba4c38334f"} Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.412205 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.427782 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" event={"ID":"2c9667bf-ec8d-4064-b52e-e5a0f55f09a3","Type":"ContainerStarted","Data":"48642475e0c8d5fea3e684d751dfe2228bbce6cb2782596d82264ca6fbc688fa"} Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.428605 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.439503 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" podStartSLOduration=4.984630623 podStartE2EDuration="32.439484517s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.621695252 +0000 UTC m=+1078.533912290" lastFinishedPulling="2026-01-28 13:04:39.076549146 +0000 UTC m=+1105.988766184" observedRunningTime="2026-01-28 13:04:41.435885408 +0000 UTC m=+1108.348102446" watchObservedRunningTime="2026-01-28 13:04:41.439484517 +0000 UTC m=+1108.351701555" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.447656 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" event={"ID":"ee8c2e3c-2df5-43aa-b624-e82e4cff81fb","Type":"ContainerStarted","Data":"0d05255800b042fc986a8069b948bcf3878529d71ee7af7255a469aea3502ed0"} Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.448554 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.454503 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" event={"ID":"8f0ab1f6-45a7-4731-b418-f9131c97217a","Type":"ContainerStarted","Data":"4c3d9c792af7a5dfc750c77fcd35269221726a36a0cb62cbdbf158253f6a29c2"} Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.455391 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.458836 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" event={"ID":"2801f0da-025c-46a4-a123-6e71c300b025","Type":"ContainerStarted","Data":"ea511636dd5fe3d6703fffa8e0e3aca7ed24d3d29e2428b99095a0dba7d532a7"} Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.459163 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.463090 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" event={"ID":"1dada58b-0b20-4d23-aa46-164beef54624","Type":"ContainerStarted","Data":"c9ae553795269b64074fd3d2c5c9b9ece03f7ef55d0e38caa07868e91b66995b"} Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.463394 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.475708 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" event={"ID":"dedfeb84-9e8b-46f8-ac8f-0c5a85380160","Type":"ContainerStarted","Data":"43257c1c221ca2e2b2ffc6f26f425bd836c9e879d0360135f3aea38d66724ce7"} Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.475980 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.478847 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" podStartSLOduration=5.306673537 podStartE2EDuration="32.478829629s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.904412294 +0000 UTC m=+1078.816629332" lastFinishedPulling="2026-01-28 13:04:39.076568396 +0000 UTC m=+1105.988785424" observedRunningTime="2026-01-28 13:04:41.474691004 +0000 UTC m=+1108.386908042" watchObservedRunningTime="2026-01-28 13:04:41.478829629 +0000 UTC m=+1108.391046667" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.485057 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.506095 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" podStartSLOduration=2.919857884 podStartE2EDuration="31.506061778s" podCreationTimestamp="2026-01-28 13:04:10 +0000 UTC" firstStartedPulling="2026-01-28 13:04:12.342796357 +0000 UTC m=+1079.255013395" lastFinishedPulling="2026-01-28 13:04:40.929000251 +0000 UTC m=+1107.841217289" observedRunningTime="2026-01-28 13:04:41.49777385 +0000 UTC m=+1108.409990898" watchObservedRunningTime="2026-01-28 13:04:41.506061778 +0000 UTC m=+1108.418278816" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.535430 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" podStartSLOduration=2.729854662 podStartE2EDuration="31.535398714s" podCreationTimestamp="2026-01-28 13:04:10 +0000 UTC" firstStartedPulling="2026-01-28 13:04:12.110742969 +0000 UTC m=+1079.022960007" lastFinishedPulling="2026-01-28 13:04:40.916287021 +0000 UTC m=+1107.828504059" observedRunningTime="2026-01-28 13:04:41.534599812 +0000 UTC m=+1108.446816850" watchObservedRunningTime="2026-01-28 13:04:41.535398714 +0000 UTC m=+1108.447615762" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.561899 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" podStartSLOduration=4.513288795 podStartE2EDuration="31.561875723s" podCreationTimestamp="2026-01-28 13:04:10 +0000 UTC" firstStartedPulling="2026-01-28 13:04:12.028541644 +0000 UTC m=+1078.940758682" lastFinishedPulling="2026-01-28 13:04:39.077128572 +0000 UTC m=+1105.989345610" observedRunningTime="2026-01-28 13:04:41.559731353 +0000 UTC m=+1108.471948391" watchObservedRunningTime="2026-01-28 13:04:41.561875723 +0000 UTC m=+1108.474092761" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.637863 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" podStartSLOduration=5.423129878 podStartE2EDuration="32.637834531s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.861804761 +0000 UTC m=+1078.774021799" lastFinishedPulling="2026-01-28 13:04:39.076509414 +0000 UTC m=+1105.988726452" observedRunningTime="2026-01-28 13:04:41.591809875 +0000 UTC m=+1108.504026913" watchObservedRunningTime="2026-01-28 13:04:41.637834531 +0000 UTC m=+1108.550051559" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.640509 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" podStartSLOduration=4.760344013 podStartE2EDuration="32.640502164s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.197468355 +0000 UTC m=+1078.109685393" lastFinishedPulling="2026-01-28 13:04:39.077626506 +0000 UTC m=+1105.989843544" observedRunningTime="2026-01-28 13:04:41.640073722 +0000 UTC m=+1108.552290760" watchObservedRunningTime="2026-01-28 13:04:41.640502164 +0000 UTC m=+1108.552719202" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.672783 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" podStartSLOduration=5.24506552 podStartE2EDuration="32.672753931s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.64905711 +0000 UTC m=+1078.561274148" lastFinishedPulling="2026-01-28 13:04:39.076745521 +0000 UTC m=+1105.988962559" observedRunningTime="2026-01-28 13:04:41.667231349 +0000 UTC m=+1108.579448407" watchObservedRunningTime="2026-01-28 13:04:41.672753931 +0000 UTC m=+1108.584970969" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.916991 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:41 crc kubenswrapper[4848]: I0128 13:04:41.923484 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe2e05c6-72db-4981-8b56-dc2a620003f2-cert\") pod \"infra-operator-controller-manager-694cf4f878-gcj9g\" (UID: \"fe2e05c6-72db-4981-8b56-dc2a620003f2\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.127667 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-dxgrn" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.136137 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.495103 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g"] Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.516563 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" event={"ID":"d20ac3bf-9cba-4074-962c-7ad7d7b17174","Type":"ContainerStarted","Data":"923c315f66b000fb554c25b7d3f8a4ffb890713c8ac16b1c191eb7aeb797af99"} Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.520025 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.528202 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.530080 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" event={"ID":"0a7152e1-cedd-465b-a186-9a241ca98141","Type":"ContainerStarted","Data":"a157405e8de26435b86ea0df642f12b665a088eef8737c18a7783e5a4f8ae4a2"} Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.530741 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.560650 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" event={"ID":"92cbecbc-09b7-4aa7-8511-dcc241d6b957","Type":"ContainerStarted","Data":"e10453272cbc39338c30af5ca951c22ef0fc00b44fe28777f0088ab0c2c11a19"} Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.571016 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" podStartSLOduration=5.914019915 podStartE2EDuration="33.570976708s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.41950353 +0000 UTC m=+1078.331720568" lastFinishedPulling="2026-01-28 13:04:39.076460323 +0000 UTC m=+1105.988677361" observedRunningTime="2026-01-28 13:04:42.555108662 +0000 UTC m=+1109.467325700" watchObservedRunningTime="2026-01-28 13:04:42.570976708 +0000 UTC m=+1109.483193746" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.575590 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" event={"ID":"39a4178e-2251-4cc9-bc57-2b46a5902a3d","Type":"ContainerStarted","Data":"89dca71358375870d3caa9ddb6da794c476f9b3de0065745b03e255cbe77760b"} Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.575757 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.586238 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" event={"ID":"04f3b1d4-2f58-42d7-962c-d7a940b93469","Type":"ContainerStarted","Data":"a0ef5bb711cce4de8d4b079fe1cb5eeef12a26f9fec9b412857524c1b6472922"} Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.596322 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/390dea01-5c38-4c87-98c2-32f655af4a62-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6\" (UID: \"390dea01-5c38-4c87-98c2-32f655af4a62\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.607751 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" podStartSLOduration=6.061048678 podStartE2EDuration="33.607725619s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.531413757 +0000 UTC m=+1078.443630795" lastFinishedPulling="2026-01-28 13:04:39.078090698 +0000 UTC m=+1105.990307736" observedRunningTime="2026-01-28 13:04:42.602088114 +0000 UTC m=+1109.514305152" watchObservedRunningTime="2026-01-28 13:04:42.607725619 +0000 UTC m=+1109.519942657" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.651668 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" podStartSLOduration=6.266628451 podStartE2EDuration="33.651632616s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.691459668 +0000 UTC m=+1078.603676706" lastFinishedPulling="2026-01-28 13:04:39.076463843 +0000 UTC m=+1105.988680871" observedRunningTime="2026-01-28 13:04:42.646850375 +0000 UTC m=+1109.559067413" watchObservedRunningTime="2026-01-28 13:04:42.651632616 +0000 UTC m=+1109.563849654" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.671297 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-8mp86" podStartSLOduration=4.029388124 podStartE2EDuration="32.671271786s" podCreationTimestamp="2026-01-28 13:04:10 +0000 UTC" firstStartedPulling="2026-01-28 13:04:12.272880058 +0000 UTC m=+1079.185097096" lastFinishedPulling="2026-01-28 13:04:40.91476372 +0000 UTC m=+1107.826980758" observedRunningTime="2026-01-28 13:04:42.669471196 +0000 UTC m=+1109.581688224" watchObservedRunningTime="2026-01-28 13:04:42.671271786 +0000 UTC m=+1109.583488824" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.751528 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-9frbq" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.759758 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.939256 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.939677 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.945026 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-metrics-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:42 crc kubenswrapper[4848]: I0128 13:04:42.948766 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef39eedb-8ccb-47f4-af2c-faee2565e2c9-webhook-certs\") pod \"openstack-operator-controller-manager-6b67879f4f-c5rbp\" (UID: \"ef39eedb-8ccb-47f4-af2c-faee2565e2c9\") " pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:43 crc kubenswrapper[4848]: I0128 13:04:43.034606 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-z6n5j" Jan 28 13:04:43 crc kubenswrapper[4848]: I0128 13:04:43.043330 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:43 crc kubenswrapper[4848]: I0128 13:04:43.425780 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6"] Jan 28 13:04:43 crc kubenswrapper[4848]: W0128 13:04:43.476663 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod390dea01_5c38_4c87_98c2_32f655af4a62.slice/crio-023b0fccd4dd1ca9cb4b0a4bf0a5318e573c7ba4a5f2f4067fd29aaf12f1592f WatchSource:0}: Error finding container 023b0fccd4dd1ca9cb4b0a4bf0a5318e573c7ba4a5f2f4067fd29aaf12f1592f: Status 404 returned error can't find the container with id 023b0fccd4dd1ca9cb4b0a4bf0a5318e573c7ba4a5f2f4067fd29aaf12f1592f Jan 28 13:04:43 crc kubenswrapper[4848]: I0128 13:04:43.607598 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" event={"ID":"fe2e05c6-72db-4981-8b56-dc2a620003f2","Type":"ContainerStarted","Data":"42fbb38809bf81dc36fac66a45889dae3859fd46c216e8b4f8fa5e0e740cfb50"} Jan 28 13:04:43 crc kubenswrapper[4848]: I0128 13:04:43.613398 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" event={"ID":"390dea01-5c38-4c87-98c2-32f655af4a62","Type":"ContainerStarted","Data":"023b0fccd4dd1ca9cb4b0a4bf0a5318e573c7ba4a5f2f4067fd29aaf12f1592f"} Jan 28 13:04:43 crc kubenswrapper[4848]: I0128 13:04:43.728271 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp"] Jan 28 13:04:44 crc kubenswrapper[4848]: I0128 13:04:44.623076 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" event={"ID":"ef39eedb-8ccb-47f4-af2c-faee2565e2c9","Type":"ContainerStarted","Data":"f9e27635c68e1f98d8cb41d15a4498a9c9b3e915613b1615164d4486e99cb7e0"} Jan 28 13:04:44 crc kubenswrapper[4848]: I0128 13:04:44.623503 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" event={"ID":"ef39eedb-8ccb-47f4-af2c-faee2565e2c9","Type":"ContainerStarted","Data":"6ce26bf62780081ffc0f9a1551f8533c4c0d50c60de96a5240fc347137dbfc58"} Jan 28 13:04:44 crc kubenswrapper[4848]: I0128 13:04:44.624515 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:44 crc kubenswrapper[4848]: I0128 13:04:44.658345 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" podStartSLOduration=34.658305751 podStartE2EDuration="34.658305751s" podCreationTimestamp="2026-01-28 13:04:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:04:44.651583316 +0000 UTC m=+1111.563800364" watchObservedRunningTime="2026-01-28 13:04:44.658305751 +0000 UTC m=+1111.570522809" Jan 28 13:04:46 crc kubenswrapper[4848]: I0128 13:04:46.642412 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" event={"ID":"390dea01-5c38-4c87-98c2-32f655af4a62","Type":"ContainerStarted","Data":"94689f71d8e9ee19b02663d95a3d63052828b519ffa2f7f452be625940d936cb"} Jan 28 13:04:46 crc kubenswrapper[4848]: I0128 13:04:46.643102 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:46 crc kubenswrapper[4848]: I0128 13:04:46.644752 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" event={"ID":"fe2e05c6-72db-4981-8b56-dc2a620003f2","Type":"ContainerStarted","Data":"de1f86c332b608e1fe217b7cab6c51b09d5c19a77097c1ae48d3436707f7e150"} Jan 28 13:04:46 crc kubenswrapper[4848]: I0128 13:04:46.644946 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:46 crc kubenswrapper[4848]: I0128 13:04:46.686233 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" podStartSLOduration=35.005651196 podStartE2EDuration="37.68621042s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:43.497486913 +0000 UTC m=+1110.409703951" lastFinishedPulling="2026-01-28 13:04:46.178046127 +0000 UTC m=+1113.090263175" observedRunningTime="2026-01-28 13:04:46.678853428 +0000 UTC m=+1113.591070476" watchObservedRunningTime="2026-01-28 13:04:46.68621042 +0000 UTC m=+1113.598427458" Jan 28 13:04:46 crc kubenswrapper[4848]: I0128 13:04:46.718866 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" podStartSLOduration=34.05187193 podStartE2EDuration="37.718843447s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:42.507622596 +0000 UTC m=+1109.419839634" lastFinishedPulling="2026-01-28 13:04:46.174594093 +0000 UTC m=+1113.086811151" observedRunningTime="2026-01-28 13:04:46.709944472 +0000 UTC m=+1113.622161510" watchObservedRunningTime="2026-01-28 13:04:46.718843447 +0000 UTC m=+1113.631060485" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.073024 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-tx7mn" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.169814 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-9jqlp" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.214087 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-hfnz7" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.324673 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g54sg" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.346957 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.602663 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-v6mn8" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.680284 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" event={"ID":"f41ee80c-1ab9-4786-8fec-d7b3a12d545b","Type":"ContainerStarted","Data":"680761c48c60d3e0cab624e10b6f2e6d851348f3e3a8279295dbb31587362c29"} Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.680526 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.682017 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-mckcj" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.682923 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" event={"ID":"b29a79e7-07da-4c52-9798-e279092c28df","Type":"ContainerStarted","Data":"1d4d90b9b9eef2c7141d454551b51e1e38756f44e86e2d7ea3280f25e7237c48"} Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.683114 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.686118 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" event={"ID":"365e9359-c6e2-428c-8889-95a232bb3e34","Type":"ContainerStarted","Data":"98cafd07c8c603db95938a519701f994800f71d76fc9a0287d17ae096b8e500d"} Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.686595 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.688521 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" event={"ID":"34fd263e-f69d-4cc8-a003-ccb6f12273a6","Type":"ContainerStarted","Data":"f1bb7992a9c82e2c8be1d6dfcd4e61ac4c231190dcff735758c66db09b641a47"} Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.688790 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.690745 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" event={"ID":"0593b76f-9225-457e-9c0f-186dc73f37a3","Type":"ContainerStarted","Data":"bbc9a55e1b6cb2ecd4d1b912370a7cd8bdc19b67a02acff3284f8de4de6134cc"} Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.691075 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.691926 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" event={"ID":"4747f67c-5dd8-415a-8ff5-c6b43e1142cf","Type":"ContainerStarted","Data":"80cfdef14d8115671ce31463130c506768e12e2a37f080a65c19f6aeb1344953"} Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.692111 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.694574 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" event={"ID":"164ef38a-92cd-4442-8925-509ba68366ba","Type":"ContainerStarted","Data":"89bb0eb96811319c82b21396cd2b40a54c6327af84f046a1a911f8be9d5f8d1b"} Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.695905 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.710997 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" podStartSLOduration=3.389127616 podStartE2EDuration="41.710965121s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.112013672 +0000 UTC m=+1078.024230700" lastFinishedPulling="2026-01-28 13:04:49.433851167 +0000 UTC m=+1116.346068205" observedRunningTime="2026-01-28 13:04:50.700091073 +0000 UTC m=+1117.612308141" watchObservedRunningTime="2026-01-28 13:04:50.710965121 +0000 UTC m=+1117.623182159" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.727426 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" podStartSLOduration=2.6145299680000003 podStartE2EDuration="40.727397123s" podCreationTimestamp="2026-01-28 13:04:10 +0000 UTC" firstStartedPulling="2026-01-28 13:04:12.182740206 +0000 UTC m=+1079.094957244" lastFinishedPulling="2026-01-28 13:04:50.295607351 +0000 UTC m=+1117.207824399" observedRunningTime="2026-01-28 13:04:50.723863036 +0000 UTC m=+1117.636080074" watchObservedRunningTime="2026-01-28 13:04:50.727397123 +0000 UTC m=+1117.639614161" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.754711 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" podStartSLOduration=4.213580932 podStartE2EDuration="41.754683903s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.893207318 +0000 UTC m=+1078.805424356" lastFinishedPulling="2026-01-28 13:04:49.434310279 +0000 UTC m=+1116.346527327" observedRunningTime="2026-01-28 13:04:50.746643572 +0000 UTC m=+1117.658860610" watchObservedRunningTime="2026-01-28 13:04:50.754683903 +0000 UTC m=+1117.666900941" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.774555 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" podStartSLOduration=3.399008413 podStartE2EDuration="41.774531539s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:10.921630342 +0000 UTC m=+1077.833847380" lastFinishedPulling="2026-01-28 13:04:49.297153468 +0000 UTC m=+1116.209370506" observedRunningTime="2026-01-28 13:04:50.769465239 +0000 UTC m=+1117.681682297" watchObservedRunningTime="2026-01-28 13:04:50.774531539 +0000 UTC m=+1117.686748577" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.799903 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" podStartSLOduration=3.313161948 podStartE2EDuration="41.799883686s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:10.946137031 +0000 UTC m=+1077.858354069" lastFinishedPulling="2026-01-28 13:04:49.432858759 +0000 UTC m=+1116.345075807" observedRunningTime="2026-01-28 13:04:50.796837042 +0000 UTC m=+1117.709054080" watchObservedRunningTime="2026-01-28 13:04:50.799883686 +0000 UTC m=+1117.712100714" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.830817 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" podStartSLOduration=4.269622168 podStartE2EDuration="41.830797836s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.871623949 +0000 UTC m=+1078.783840987" lastFinishedPulling="2026-01-28 13:04:49.432799617 +0000 UTC m=+1116.345016655" observedRunningTime="2026-01-28 13:04:50.826703064 +0000 UTC m=+1117.738920122" watchObservedRunningTime="2026-01-28 13:04:50.830797836 +0000 UTC m=+1117.743014864" Jan 28 13:04:50 crc kubenswrapper[4848]: I0128 13:04:50.933793 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" podStartSLOduration=3.333982123 podStartE2EDuration="40.933774307s" podCreationTimestamp="2026-01-28 13:04:10 +0000 UTC" firstStartedPulling="2026-01-28 13:04:12.323398757 +0000 UTC m=+1079.235615795" lastFinishedPulling="2026-01-28 13:04:49.923190941 +0000 UTC m=+1116.835407979" observedRunningTime="2026-01-28 13:04:50.875421823 +0000 UTC m=+1117.787638861" watchObservedRunningTime="2026-01-28 13:04:50.933774307 +0000 UTC m=+1117.845991345" Jan 28 13:04:51 crc kubenswrapper[4848]: I0128 13:04:51.033317 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-csw7g" Jan 28 13:04:51 crc kubenswrapper[4848]: I0128 13:04:51.100628 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-2g2qj" Jan 28 13:04:51 crc kubenswrapper[4848]: I0128 13:04:51.160541 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-cxnsf" Jan 28 13:04:51 crc kubenswrapper[4848]: I0128 13:04:51.217642 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-5h8th" Jan 28 13:04:52 crc kubenswrapper[4848]: I0128 13:04:52.142276 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-gcj9g" Jan 28 13:04:52 crc kubenswrapper[4848]: I0128 13:04:52.715035 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" event={"ID":"82ac0cb8-c28c-4242-8aa5-817aaf35ea3e","Type":"ContainerStarted","Data":"3d0408e7267e2cdb4010dc2943a3c8bb54fb4c76e5111d17a4fd0237eb7e9f66"} Jan 28 13:04:52 crc kubenswrapper[4848]: I0128 13:04:52.715771 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" Jan 28 13:04:52 crc kubenswrapper[4848]: I0128 13:04:52.738527 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" podStartSLOduration=3.933467371 podStartE2EDuration="43.73849981s" podCreationTimestamp="2026-01-28 13:04:09 +0000 UTC" firstStartedPulling="2026-01-28 13:04:11.78819203 +0000 UTC m=+1078.700409068" lastFinishedPulling="2026-01-28 13:04:51.593224469 +0000 UTC m=+1118.505441507" observedRunningTime="2026-01-28 13:04:52.733970696 +0000 UTC m=+1119.646187744" watchObservedRunningTime="2026-01-28 13:04:52.73849981 +0000 UTC m=+1119.650716848" Jan 28 13:04:52 crc kubenswrapper[4848]: I0128 13:04:52.767661 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6" Jan 28 13:04:53 crc kubenswrapper[4848]: I0128 13:04:53.053318 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6b67879f4f-c5rbp" Jan 28 13:04:59 crc kubenswrapper[4848]: I0128 13:04:59.902018 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-cjt92" Jan 28 13:04:59 crc kubenswrapper[4848]: I0128 13:04:59.926341 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-dj8qm" Jan 28 13:04:59 crc kubenswrapper[4848]: I0128 13:04:59.969889 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-9nvdh" Jan 28 13:05:00 crc kubenswrapper[4848]: I0128 13:05:00.563124 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-xb97k" Jan 28 13:05:00 crc kubenswrapper[4848]: I0128 13:05:00.684480 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-qpthc" Jan 28 13:05:00 crc kubenswrapper[4848]: I0128 13:05:00.715326 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-jwvlh" Jan 28 13:05:01 crc kubenswrapper[4848]: I0128 13:05:01.134157 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-s8mg8" Jan 28 13:05:01 crc kubenswrapper[4848]: I0128 13:05:01.222544 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-59c5775db7-r2ppl" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.874029 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57b9d58665-cgh5r"] Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.876196 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.883686 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.883883 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.883996 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.884162 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-fmq9c" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.957619 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-config\") pod \"dnsmasq-dns-57b9d58665-cgh5r\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.957727 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lv6h\" (UniqueName: \"kubernetes.io/projected/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-kube-api-access-9lv6h\") pod \"dnsmasq-dns-57b9d58665-cgh5r\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:19 crc kubenswrapper[4848]: I0128 13:05:19.963332 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57b9d58665-cgh5r"] Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.010599 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bb9bf987-6j72h"] Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.012267 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.027407 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.036364 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb9bf987-6j72h"] Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.062237 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-config\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.064925 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9rxp\" (UniqueName: \"kubernetes.io/projected/ed831fad-154d-46e3-b8fa-67dd41030c18-kube-api-access-j9rxp\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.065086 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-config\") pod \"dnsmasq-dns-57b9d58665-cgh5r\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.065175 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lv6h\" (UniqueName: \"kubernetes.io/projected/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-kube-api-access-9lv6h\") pod \"dnsmasq-dns-57b9d58665-cgh5r\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.065286 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-dns-svc\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.066537 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-config\") pod \"dnsmasq-dns-57b9d58665-cgh5r\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.112790 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lv6h\" (UniqueName: \"kubernetes.io/projected/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-kube-api-access-9lv6h\") pod \"dnsmasq-dns-57b9d58665-cgh5r\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.168177 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-dns-svc\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.168289 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-config\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.168319 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9rxp\" (UniqueName: \"kubernetes.io/projected/ed831fad-154d-46e3-b8fa-67dd41030c18-kube-api-access-j9rxp\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.169520 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-dns-svc\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.170125 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-config\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.196226 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.215642 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9rxp\" (UniqueName: \"kubernetes.io/projected/ed831fad-154d-46e3-b8fa-67dd41030c18-kube-api-access-j9rxp\") pod \"dnsmasq-dns-7bb9bf987-6j72h\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.347870 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.704104 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57b9d58665-cgh5r"] Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.866430 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb9bf987-6j72h"] Jan 28 13:05:20 crc kubenswrapper[4848]: W0128 13:05:20.876210 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded831fad_154d_46e3_b8fa_67dd41030c18.slice/crio-7f210f1741ffcc37943d64ae136f58e3d0b2c65899612f9e555799daed60b858 WatchSource:0}: Error finding container 7f210f1741ffcc37943d64ae136f58e3d0b2c65899612f9e555799daed60b858: Status 404 returned error can't find the container with id 7f210f1741ffcc37943d64ae136f58e3d0b2c65899612f9e555799daed60b858 Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.955975 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" event={"ID":"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2","Type":"ContainerStarted","Data":"69f78a25f0b078d517b84c88e5b1fc8e3070d5ff4dc0aa9e92b8649ab10f55c1"} Jan 28 13:05:20 crc kubenswrapper[4848]: I0128 13:05:20.957977 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" event={"ID":"ed831fad-154d-46e3-b8fa-67dd41030c18","Type":"ContainerStarted","Data":"7f210f1741ffcc37943d64ae136f58e3d0b2c65899612f9e555799daed60b858"} Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.700750 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57b9d58665-cgh5r"] Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.766996 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f7c4fd555-m2fgj"] Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.768398 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.820509 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f7c4fd555-m2fgj"] Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.850636 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-dns-svc\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.850718 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2jtc\" (UniqueName: \"kubernetes.io/projected/4d2a4c68-7925-4e6b-befe-032010898a54-kube-api-access-b2jtc\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.850775 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-config\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.952632 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-dns-svc\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.952736 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2jtc\" (UniqueName: \"kubernetes.io/projected/4d2a4c68-7925-4e6b-befe-032010898a54-kube-api-access-b2jtc\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.952819 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-config\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.953825 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-config\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:23 crc kubenswrapper[4848]: I0128 13:05:23.954349 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-dns-svc\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.030580 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2jtc\" (UniqueName: \"kubernetes.io/projected/4d2a4c68-7925-4e6b-befe-032010898a54-kube-api-access-b2jtc\") pod \"dnsmasq-dns-7f7c4fd555-m2fgj\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.104921 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.249290 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb9bf987-6j72h"] Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.257159 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bf86d7f99-lkd9s"] Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.258631 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.263274 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf86d7f99-lkd9s"] Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.460935 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46zr7\" (UniqueName: \"kubernetes.io/projected/675a7fe6-6f96-4af1-9881-6be45e190425-kube-api-access-46zr7\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.461033 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-dns-svc\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.461095 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-config\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.562637 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46zr7\" (UniqueName: \"kubernetes.io/projected/675a7fe6-6f96-4af1-9881-6be45e190425-kube-api-access-46zr7\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.563208 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-dns-svc\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.563344 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-config\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.564562 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-config\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.571384 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-dns-svc\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.609163 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46zr7\" (UniqueName: \"kubernetes.io/projected/675a7fe6-6f96-4af1-9881-6be45e190425-kube-api-access-46zr7\") pod \"dnsmasq-dns-bf86d7f99-lkd9s\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.633022 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f7c4fd555-m2fgj"] Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.652985 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86fbff885-vnwng"] Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.656549 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.668514 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-dns-svc\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.668579 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-config\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.668610 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttsfb\" (UniqueName: \"kubernetes.io/projected/382bff15-0b1d-495c-be83-5f742696eb21-kube-api-access-ttsfb\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.673509 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86fbff885-vnwng"] Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.769709 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f7c4fd555-m2fgj"] Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.770970 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-dns-svc\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.771036 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttsfb\" (UniqueName: \"kubernetes.io/projected/382bff15-0b1d-495c-be83-5f742696eb21-kube-api-access-ttsfb\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.771077 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-config\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.772060 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-dns-svc\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.773183 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-config\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.801748 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttsfb\" (UniqueName: \"kubernetes.io/projected/382bff15-0b1d-495c-be83-5f742696eb21-kube-api-access-ttsfb\") pod \"dnsmasq-dns-86fbff885-vnwng\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:24 crc kubenswrapper[4848]: I0128 13:05:24.900356 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.004231 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.009165 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.011858 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.016027 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zqzld" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.016397 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.016631 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.016726 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.016909 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.017307 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.021856 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.024009 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192152 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192224 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192290 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9069c6ac-fe99-41c7-8ee1-0154d87e506c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192337 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192373 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192398 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6s4x\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-kube-api-access-q6s4x\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192424 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192447 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192501 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9069c6ac-fe99-41c7-8ee1-0154d87e506c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192525 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.192552 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-config-data\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295294 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9069c6ac-fe99-41c7-8ee1-0154d87e506c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295372 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295409 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295430 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6s4x\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-kube-api-access-q6s4x\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295452 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295470 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295511 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9069c6ac-fe99-41c7-8ee1-0154d87e506c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295531 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295575 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-config-data\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295628 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.295647 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.297421 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.297499 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.297712 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.298161 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.300560 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.301609 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.302220 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9069c6ac-fe99-41c7-8ee1-0154d87e506c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.302936 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.309640 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-config-data\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.312941 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9069c6ac-fe99-41c7-8ee1-0154d87e506c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.322040 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.322282 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6s4x\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-kube-api-access-q6s4x\") pod \"rabbitmq-server-0\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.352675 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.408425 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.412296 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.418704 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-lts5k" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.418991 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.419153 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.423225 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.423552 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.423901 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.424124 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.435226 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602507 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602567 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602592 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602627 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6be2776-ada1-4c48-9588-9e488283ee6e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602647 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602670 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6be2776-ada1-4c48-9588-9e488283ee6e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602689 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602710 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602734 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602773 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.602790 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cmpq\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-kube-api-access-8cmpq\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705073 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705132 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705167 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705212 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6be2776-ada1-4c48-9588-9e488283ee6e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705242 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705286 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6be2776-ada1-4c48-9588-9e488283ee6e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705310 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705343 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705373 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705427 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.705451 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cmpq\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-kube-api-access-8cmpq\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.707387 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.708046 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.714034 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.715442 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6be2776-ada1-4c48-9588-9e488283ee6e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.722659 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.722975 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.723235 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.728346 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6be2776-ada1-4c48-9588-9e488283ee6e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.729673 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.730899 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cmpq\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-kube-api-access-8cmpq\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.746752 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.757082 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.803923 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.821077 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.832172 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.843569 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.844222 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.844718 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.846466 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.846459 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.846742 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-wwjwq" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.884418 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.925664 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.925972 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.926149 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.926295 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.927929 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.928020 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.928210 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ff062566-cfd3-4393-b794-695d3473ef1a-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.928276 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4z4n\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-kube-api-access-k4z4n\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.928322 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ff062566-cfd3-4393-b794-695d3473ef1a-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.928373 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:25 crc kubenswrapper[4848]: I0128 13:05:25.928597 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.030735 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.030809 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.030851 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ff062566-cfd3-4393-b794-695d3473ef1a-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.030885 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4z4n\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-kube-api-access-k4z4n\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.030923 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ff062566-cfd3-4393-b794-695d3473ef1a-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.030955 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.030993 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.031034 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.031058 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.031073 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.031110 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.031149 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.031764 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.031963 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.032991 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.033142 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.034163 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ff062566-cfd3-4393-b794-695d3473ef1a-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.035457 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.036919 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.037167 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.038693 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ff062566-cfd3-4393-b794-695d3473ef1a-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.039872 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ff062566-cfd3-4393-b794-695d3473ef1a-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.052445 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4z4n\" (UniqueName: \"kubernetes.io/projected/ff062566-cfd3-4393-b794-695d3473ef1a-kube-api-access-k4z4n\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.076895 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"ff062566-cfd3-4393-b794-695d3473ef1a\") " pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.165828 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.821194 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.822977 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.827571 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.827919 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-t5hwt" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.828239 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.829183 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.842193 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.866133 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957194 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957291 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957327 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-config-data-default\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957359 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l5qb\" (UniqueName: \"kubernetes.io/projected/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-kube-api-access-6l5qb\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957392 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957415 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-kolla-config\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957456 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:26 crc kubenswrapper[4848]: I0128 13:05:26.957488 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060422 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060488 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060521 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060550 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-config-data-default\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060580 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l5qb\" (UniqueName: \"kubernetes.io/projected/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-kube-api-access-6l5qb\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060607 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060636 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-kolla-config\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.060675 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.061138 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.062315 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-config-data-default\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.062515 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.063695 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.065011 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-kolla-config\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.077275 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.092657 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.093722 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l5qb\" (UniqueName: \"kubernetes.io/projected/ee209e0b-96f8-46ef-b1ff-2fac23c03ecc-kube-api-access-6l5qb\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.101152 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc\") " pod="openstack/openstack-galera-0" Jan 28 13:05:27 crc kubenswrapper[4848]: I0128 13:05:27.149696 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.169711 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.173734 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.182002 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.183081 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.183117 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-mwlfk" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.183350 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.183405 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281510 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281575 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4gtd\" (UniqueName: \"kubernetes.io/projected/3face43f-5a30-4c86-b004-3a98bb508b55-kube-api-access-j4gtd\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281608 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3face43f-5a30-4c86-b004-3a98bb508b55-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281632 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281658 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281849 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3face43f-5a30-4c86-b004-3a98bb508b55-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281933 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.281982 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3face43f-5a30-4c86-b004-3a98bb508b55-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.385950 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3face43f-5a30-4c86-b004-3a98bb508b55-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.386028 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.386057 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4gtd\" (UniqueName: \"kubernetes.io/projected/3face43f-5a30-4c86-b004-3a98bb508b55-kube-api-access-j4gtd\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.386089 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3face43f-5a30-4c86-b004-3a98bb508b55-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.386136 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.386163 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.386187 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3face43f-5a30-4c86-b004-3a98bb508b55-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.386312 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.387456 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.387681 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.389886 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.389899 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3face43f-5a30-4c86-b004-3a98bb508b55-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.390276 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3face43f-5a30-4c86-b004-3a98bb508b55-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.393398 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3face43f-5a30-4c86-b004-3a98bb508b55-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.396553 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3face43f-5a30-4c86-b004-3a98bb508b55-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.423201 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.428206 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4gtd\" (UniqueName: \"kubernetes.io/projected/3face43f-5a30-4c86-b004-3a98bb508b55-kube-api-access-j4gtd\") pod \"openstack-cell1-galera-0\" (UID: \"3face43f-5a30-4c86-b004-3a98bb508b55\") " pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.492777 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.668682 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.698377 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.698543 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.707911 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-nclv5" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.708173 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.708236 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.795042 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7e5a41be-973a-4b25-991f-ccbdef21b343-kolla-config\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.795434 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e5a41be-973a-4b25-991f-ccbdef21b343-config-data\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.795570 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79kfc\" (UniqueName: \"kubernetes.io/projected/7e5a41be-973a-4b25-991f-ccbdef21b343-kube-api-access-79kfc\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.795690 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e5a41be-973a-4b25-991f-ccbdef21b343-memcached-tls-certs\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.795784 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5a41be-973a-4b25-991f-ccbdef21b343-combined-ca-bundle\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.906619 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7e5a41be-973a-4b25-991f-ccbdef21b343-kolla-config\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.906689 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e5a41be-973a-4b25-991f-ccbdef21b343-config-data\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.906777 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79kfc\" (UniqueName: \"kubernetes.io/projected/7e5a41be-973a-4b25-991f-ccbdef21b343-kube-api-access-79kfc\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.907770 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7e5a41be-973a-4b25-991f-ccbdef21b343-kolla-config\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.907873 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e5a41be-973a-4b25-991f-ccbdef21b343-config-data\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.909187 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e5a41be-973a-4b25-991f-ccbdef21b343-memcached-tls-certs\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.909289 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5a41be-973a-4b25-991f-ccbdef21b343-combined-ca-bundle\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.917392 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5a41be-973a-4b25-991f-ccbdef21b343-combined-ca-bundle\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.929584 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e5a41be-973a-4b25-991f-ccbdef21b343-memcached-tls-certs\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:28 crc kubenswrapper[4848]: I0128 13:05:28.930593 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79kfc\" (UniqueName: \"kubernetes.io/projected/7e5a41be-973a-4b25-991f-ccbdef21b343-kube-api-access-79kfc\") pod \"memcached-0\" (UID: \"7e5a41be-973a-4b25-991f-ccbdef21b343\") " pod="openstack/memcached-0" Jan 28 13:05:29 crc kubenswrapper[4848]: I0128 13:05:29.023461 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 13:05:30 crc kubenswrapper[4848]: I0128 13:05:30.747717 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:05:30 crc kubenswrapper[4848]: I0128 13:05:30.749877 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 13:05:30 crc kubenswrapper[4848]: I0128 13:05:30.753476 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-sk8ht" Jan 28 13:05:30 crc kubenswrapper[4848]: I0128 13:05:30.770352 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:05:30 crc kubenswrapper[4848]: I0128 13:05:30.841609 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-td8q9\" (UniqueName: \"kubernetes.io/projected/4d631c7a-117c-4a10-a7f6-28331bc4ae84-kube-api-access-td8q9\") pod \"kube-state-metrics-0\" (UID: \"4d631c7a-117c-4a10-a7f6-28331bc4ae84\") " pod="openstack/kube-state-metrics-0" Jan 28 13:05:30 crc kubenswrapper[4848]: I0128 13:05:30.943188 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-td8q9\" (UniqueName: \"kubernetes.io/projected/4d631c7a-117c-4a10-a7f6-28331bc4ae84-kube-api-access-td8q9\") pod \"kube-state-metrics-0\" (UID: \"4d631c7a-117c-4a10-a7f6-28331bc4ae84\") " pod="openstack/kube-state-metrics-0" Jan 28 13:05:30 crc kubenswrapper[4848]: I0128 13:05:30.981609 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-td8q9\" (UniqueName: \"kubernetes.io/projected/4d631c7a-117c-4a10-a7f6-28331bc4ae84-kube-api-access-td8q9\") pod \"kube-state-metrics-0\" (UID: \"4d631c7a-117c-4a10-a7f6-28331bc4ae84\") " pod="openstack/kube-state-metrics-0" Jan 28 13:05:31 crc kubenswrapper[4848]: I0128 13:05:31.080860 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 13:05:31 crc kubenswrapper[4848]: W0128 13:05:31.400334 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d2a4c68_7925_4e6b_befe_032010898a54.slice/crio-e55671a86832a0824223deb50dcc14bf777d686458628a11e8ee5b33e6145124 WatchSource:0}: Error finding container e55671a86832a0824223deb50dcc14bf777d686458628a11e8ee5b33e6145124: Status 404 returned error can't find the container with id e55671a86832a0824223deb50dcc14bf777d686458628a11e8ee5b33e6145124 Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.089197 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" event={"ID":"4d2a4c68-7925-4e6b-befe-032010898a54","Type":"ContainerStarted","Data":"e55671a86832a0824223deb50dcc14bf777d686458628a11e8ee5b33e6145124"} Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.117709 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.120185 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.123580 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.123776 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.124326 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-sll79" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.124487 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.124613 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.124797 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.126954 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.132339 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.143718 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163315 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163379 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/481d4e0f-f65b-466a-8e9d-f6761e78479f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163417 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163573 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r6lc\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-kube-api-access-9r6lc\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163654 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163739 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163773 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163806 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163833 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-config\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.163852 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265508 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265579 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265608 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265637 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-config\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265658 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265689 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265713 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/481d4e0f-f65b-466a-8e9d-f6761e78479f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265941 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.265994 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r6lc\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-kube-api-access-9r6lc\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.266033 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.268113 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.270326 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.270464 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.271266 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.272057 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-config\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.272325 4848 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.272448 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fbb6ff8d2ceb994243fa9499d5bbb9ac1ad8a88e4c49c99f41a1170dfb512188/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.284006 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.287081 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/481d4e0f-f65b-466a-8e9d-f6761e78479f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.289680 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.290010 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r6lc\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-kube-api-access-9r6lc\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.319693 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:32 crc kubenswrapper[4848]: I0128 13:05:32.448006 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.876090 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-p6z9h"] Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.879149 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.884903 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-lsngk" Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.884971 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.891625 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.961718 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-p6z9h"] Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.975181 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-59mkx"] Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.977367 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:34 crc kubenswrapper[4848]: I0128 13:05:34.996965 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-59mkx"] Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.027664 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e3e961-2cae-4bee-b73a-40336940b35c-scripts\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.027728 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-run-ovn\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.027753 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e3e961-2cae-4bee-b73a-40336940b35c-ovn-controller-tls-certs\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.027797 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5xhv\" (UniqueName: \"kubernetes.io/projected/77e3e961-2cae-4bee-b73a-40336940b35c-kube-api-access-f5xhv\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.027819 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e3e961-2cae-4bee-b73a-40336940b35c-combined-ca-bundle\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.027852 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-log-ovn\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.027966 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-run\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.129764 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e3e961-2cae-4bee-b73a-40336940b35c-scripts\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.129816 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-run-ovn\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.129844 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e3e961-2cae-4bee-b73a-40336940b35c-ovn-controller-tls-certs\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.129866 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49e7ea17-ef5b-4403-ad09-3553928c90e3-scripts\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.129887 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wsmr\" (UniqueName: \"kubernetes.io/projected/49e7ea17-ef5b-4403-ad09-3553928c90e3-kube-api-access-6wsmr\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.129911 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5xhv\" (UniqueName: \"kubernetes.io/projected/77e3e961-2cae-4bee-b73a-40336940b35c-kube-api-access-f5xhv\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.130181 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e3e961-2cae-4bee-b73a-40336940b35c-combined-ca-bundle\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.130610 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-etc-ovs\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.130649 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-log-ovn\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.130678 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-log\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.130780 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-run\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.130843 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-lib\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.130902 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-run\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.131115 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-log-ovn\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.131262 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-run\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.131397 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/77e3e961-2cae-4bee-b73a-40336940b35c-var-run-ovn\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.133370 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e3e961-2cae-4bee-b73a-40336940b35c-scripts\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.137038 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e3e961-2cae-4bee-b73a-40336940b35c-ovn-controller-tls-certs\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.142835 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e3e961-2cae-4bee-b73a-40336940b35c-combined-ca-bundle\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.149046 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5xhv\" (UniqueName: \"kubernetes.io/projected/77e3e961-2cae-4bee-b73a-40336940b35c-kube-api-access-f5xhv\") pod \"ovn-controller-p6z9h\" (UID: \"77e3e961-2cae-4bee-b73a-40336940b35c\") " pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.205879 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.232716 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-etc-ovs\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.232771 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-log\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.232817 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-run\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.232849 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-lib\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.232942 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49e7ea17-ef5b-4403-ad09-3553928c90e3-scripts\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.232974 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wsmr\" (UniqueName: \"kubernetes.io/projected/49e7ea17-ef5b-4403-ad09-3553928c90e3-kube-api-access-6wsmr\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.233191 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-log\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.233312 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-etc-ovs\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.233400 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-lib\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.233416 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/49e7ea17-ef5b-4403-ad09-3553928c90e3-var-run\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.235895 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49e7ea17-ef5b-4403-ad09-3553928c90e3-scripts\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.253933 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wsmr\" (UniqueName: \"kubernetes.io/projected/49e7ea17-ef5b-4403-ad09-3553928c90e3-kube-api-access-6wsmr\") pod \"ovn-controller-ovs-59mkx\" (UID: \"49e7ea17-ef5b-4403-ad09-3553928c90e3\") " pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.310873 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.384234 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.385733 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.388913 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.389197 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.389289 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.389454 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.389780 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-ffvfw" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.408400 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540100 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59144d8e-c7a9-442f-bcc3-585322a77a97-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540695 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540729 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540753 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540783 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540819 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59144d8e-c7a9-442f-bcc3-585322a77a97-config\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540854 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59144d8e-c7a9-442f-bcc3-585322a77a97-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.540886 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdk6b\" (UniqueName: \"kubernetes.io/projected/59144d8e-c7a9-442f-bcc3-585322a77a97-kube-api-access-xdk6b\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.642724 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59144d8e-c7a9-442f-bcc3-585322a77a97-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.642896 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.642959 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.642980 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.643041 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.643107 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59144d8e-c7a9-442f-bcc3-585322a77a97-config\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.643140 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59144d8e-c7a9-442f-bcc3-585322a77a97-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.643197 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdk6b\" (UniqueName: \"kubernetes.io/projected/59144d8e-c7a9-442f-bcc3-585322a77a97-kube-api-access-xdk6b\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.644603 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59144d8e-c7a9-442f-bcc3-585322a77a97-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.645223 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59144d8e-c7a9-442f-bcc3-585322a77a97-config\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.645581 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.646790 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59144d8e-c7a9-442f-bcc3-585322a77a97-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.651222 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.653698 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.654798 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59144d8e-c7a9-442f-bcc3-585322a77a97-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.668555 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdk6b\" (UniqueName: \"kubernetes.io/projected/59144d8e-c7a9-442f-bcc3-585322a77a97-kube-api-access-xdk6b\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.723279 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"59144d8e-c7a9-442f-bcc3-585322a77a97\") " pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:35 crc kubenswrapper[4848]: I0128 13:05:35.849012 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 13:05:36 crc kubenswrapper[4848]: I0128 13:05:36.010686 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 13:05:37 crc kubenswrapper[4848]: I0128 13:05:37.985066 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 13:05:37 crc kubenswrapper[4848]: I0128 13:05:37.986937 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:37 crc kubenswrapper[4848]: I0128 13:05:37.989759 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-blzsk" Jan 28 13:05:37 crc kubenswrapper[4848]: I0128 13:05:37.990325 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 28 13:05:37 crc kubenswrapper[4848]: I0128 13:05:37.991080 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 28 13:05:37 crc kubenswrapper[4848]: I0128 13:05:37.991144 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.013623 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.028231 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cd06a1-9204-4a3f-bb28-9227a8023af9-config\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.028365 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.028586 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hz22\" (UniqueName: \"kubernetes.io/projected/38cd06a1-9204-4a3f-bb28-9227a8023af9-kube-api-access-8hz22\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.028624 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/38cd06a1-9204-4a3f-bb28-9227a8023af9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.028679 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.028823 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.028923 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/38cd06a1-9204-4a3f-bb28-9227a8023af9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.029000 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131402 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cd06a1-9204-4a3f-bb28-9227a8023af9-config\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131469 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131575 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hz22\" (UniqueName: \"kubernetes.io/projected/38cd06a1-9204-4a3f-bb28-9227a8023af9-kube-api-access-8hz22\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131601 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/38cd06a1-9204-4a3f-bb28-9227a8023af9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131627 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131673 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131705 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/38cd06a1-9204-4a3f-bb28-9227a8023af9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131733 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.131969 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.132031 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/38cd06a1-9204-4a3f-bb28-9227a8023af9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.132352 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cd06a1-9204-4a3f-bb28-9227a8023af9-config\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.133080 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/38cd06a1-9204-4a3f-bb28-9227a8023af9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.138237 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.138278 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.138588 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/38cd06a1-9204-4a3f-bb28-9227a8023af9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.149807 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hz22\" (UniqueName: \"kubernetes.io/projected/38cd06a1-9204-4a3f-bb28-9227a8023af9-kube-api-access-8hz22\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.159718 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"38cd06a1-9204-4a3f-bb28-9227a8023af9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:38 crc kubenswrapper[4848]: I0128 13:05:38.321648 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 13:05:41 crc kubenswrapper[4848]: W0128 13:05:41.359537 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e5a41be_973a_4b25_991f_ccbdef21b343.slice/crio-06ca90418060fe4a07f959ca8885669772d953fb0ee6cd832e81ae4e9277e4f4 WatchSource:0}: Error finding container 06ca90418060fe4a07f959ca8885669772d953fb0ee6cd832e81ae4e9277e4f4: Status 404 returned error can't find the container with id 06ca90418060fe4a07f959ca8885669772d953fb0ee6cd832e81ae4e9277e4f4 Jan 28 13:05:41 crc kubenswrapper[4848]: I0128 13:05:41.860499 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:05:42 crc kubenswrapper[4848]: I0128 13:05:42.180201 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"7e5a41be-973a-4b25-991f-ccbdef21b343","Type":"ContainerStarted","Data":"06ca90418060fe4a07f959ca8885669772d953fb0ee6cd832e81ae4e9277e4f4"} Jan 28 13:05:42 crc kubenswrapper[4848]: W0128 13:05:42.341690 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d631c7a_117c_4a10_a7f6_28331bc4ae84.slice/crio-b70e86e58bd3f4ffd5fc1ef25896c04958fad3c6add1d6b0fdddace71ff66be8 WatchSource:0}: Error finding container b70e86e58bd3f4ffd5fc1ef25896c04958fad3c6add1d6b0fdddace71ff66be8: Status 404 returned error can't find the container with id b70e86e58bd3f4ffd5fc1ef25896c04958fad3c6add1d6b0fdddace71ff66be8 Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.353674 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.353731 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.353892 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.20:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9lv6h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57b9d58665-cgh5r_openstack(7d6b464f-beb3-4c1c-a1f8-473e806ea1c2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.355126 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" podUID="7d6b464f-beb3-4c1c-a1f8-473e806ea1c2" Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.404935 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.405018 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.405189 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.20:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j9rxp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bb9bf987-6j72h_openstack(ed831fad-154d-46e3-b8fa-67dd41030c18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:05:42 crc kubenswrapper[4848]: E0128 13:05:42.406652 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" podUID="ed831fad-154d-46e3-b8fa-67dd41030c18" Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.095817 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf86d7f99-lkd9s"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.200893 4848 generic.go:334] "Generic (PLEG): container finished" podID="4d2a4c68-7925-4e6b-befe-032010898a54" containerID="4ec3dbae46d8b3faf3a72acfa41fb4c5b8b4a77d1aab01879ed90a1520fbc6ef" exitCode=0 Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.200976 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" event={"ID":"4d2a4c68-7925-4e6b-befe-032010898a54","Type":"ContainerDied","Data":"4ec3dbae46d8b3faf3a72acfa41fb4c5b8b4a77d1aab01879ed90a1520fbc6ef"} Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.206174 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4d631c7a-117c-4a10-a7f6-28331bc4ae84","Type":"ContainerStarted","Data":"b70e86e58bd3f4ffd5fc1ef25896c04958fad3c6add1d6b0fdddace71ff66be8"} Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.461082 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86fbff885-vnwng"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.481470 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.491104 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.501606 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.510434 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-p6z9h"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.613832 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.958732 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:05:43 crc kubenswrapper[4848]: I0128 13:05:43.997529 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 13:05:44 crc kubenswrapper[4848]: I0128 13:05:44.053216 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Jan 28 13:05:44 crc kubenswrapper[4848]: I0128 13:05:44.139306 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-59mkx"] Jan 28 13:05:44 crc kubenswrapper[4848]: I0128 13:05:44.715774 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 13:05:45 crc kubenswrapper[4848]: W0128 13:05:45.155613 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6be2776_ada1_4c48_9588_9e488283ee6e.slice/crio-edbaa59f957bf2e0bba233789d74da933f88cb587e9303c2e2bc9bc9b393f01a WatchSource:0}: Error finding container edbaa59f957bf2e0bba233789d74da933f88cb587e9303c2e2bc9bc9b393f01a: Status 404 returned error can't find the container with id edbaa59f957bf2e0bba233789d74da933f88cb587e9303c2e2bc9bc9b393f01a Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.242303 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"ff062566-cfd3-4393-b794-695d3473ef1a","Type":"ContainerStarted","Data":"81f49817597cfe30745dced94cc2f149152e1b0ff5217abd2decde41a801f3cc"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.244571 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerStarted","Data":"3926ecd805d8e437cc5991528dbdf2046d9498450b6ff199bab4ba65d8f0dd57"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.246156 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-59mkx" event={"ID":"49e7ea17-ef5b-4403-ad09-3553928c90e3","Type":"ContainerStarted","Data":"33029b3c16da2cdde63d49f13729351585c45cbc08b92d855d35bfb080d5385c"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.248259 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" event={"ID":"675a7fe6-6f96-4af1-9881-6be45e190425","Type":"ContainerStarted","Data":"ac008858591add370598db62ca6930abd10043935f176e371b643901426e21ae"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.249568 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc","Type":"ContainerStarted","Data":"09533eea60ac1921c7f46ac4efe44f9d4278aafdba4f84660588c96505893f89"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.251617 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" event={"ID":"ed831fad-154d-46e3-b8fa-67dd41030c18","Type":"ContainerDied","Data":"7f210f1741ffcc37943d64ae136f58e3d0b2c65899612f9e555799daed60b858"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.251649 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f210f1741ffcc37943d64ae136f58e3d0b2c65899612f9e555799daed60b858" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.252893 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6be2776-ada1-4c48-9588-9e488283ee6e","Type":"ContainerStarted","Data":"edbaa59f957bf2e0bba233789d74da933f88cb587e9303c2e2bc9bc9b393f01a"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.254703 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.255275 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"38cd06a1-9204-4a3f-bb28-9227a8023af9","Type":"ContainerStarted","Data":"95c5995bcd1f24656a5cff0a5a83b6b3b043af681ed6ce51294e577ae93afa2f"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.256270 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3face43f-5a30-4c86-b004-3a98bb508b55","Type":"ContainerStarted","Data":"18ecd8de5d66365acdde13c7c5505afc1965272f12985446e85172503ded62d4"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.258457 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" event={"ID":"4d2a4c68-7925-4e6b-befe-032010898a54","Type":"ContainerDied","Data":"e55671a86832a0824223deb50dcc14bf777d686458628a11e8ee5b33e6145124"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.258503 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7c4fd555-m2fgj" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.258530 4848 scope.go:117] "RemoveContainer" containerID="4ec3dbae46d8b3faf3a72acfa41fb4c5b8b4a77d1aab01879ed90a1520fbc6ef" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.262495 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h" event={"ID":"77e3e961-2cae-4bee-b73a-40336940b35c","Type":"ContainerStarted","Data":"def42b72fc14fae9bbeda29a8dc86d997958a90709b951e902a32dbd184be81e"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.271337 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86fbff885-vnwng" event={"ID":"382bff15-0b1d-495c-be83-5f742696eb21","Type":"ContainerStarted","Data":"1ccdc41c62fe5a46e1ff325b2217313c14aa6e8a2f362c266685433ec9c220a2"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.276741 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.279940 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" event={"ID":"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2","Type":"ContainerDied","Data":"69f78a25f0b078d517b84c88e5b1fc8e3070d5ff4dc0aa9e92b8649ab10f55c1"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.279973 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69f78a25f0b078d517b84c88e5b1fc8e3070d5ff4dc0aa9e92b8649ab10f55c1" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.281996 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"59144d8e-c7a9-442f-bcc3-585322a77a97","Type":"ContainerStarted","Data":"9a6f5a079cc738a3735c26b150458069b8857b6a574a82edbd1cbb083d6eea6b"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.284107 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9069c6ac-fe99-41c7-8ee1-0154d87e506c","Type":"ContainerStarted","Data":"fd375ffabd8d2cb2d2c08099cd4b9bf955dceba32ca9edee03b2cd15153a5029"} Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.290635 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.409152 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-config\") pod \"ed831fad-154d-46e3-b8fa-67dd41030c18\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.409240 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9rxp\" (UniqueName: \"kubernetes.io/projected/ed831fad-154d-46e3-b8fa-67dd41030c18-kube-api-access-j9rxp\") pod \"ed831fad-154d-46e3-b8fa-67dd41030c18\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.409389 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-dns-svc\") pod \"ed831fad-154d-46e3-b8fa-67dd41030c18\" (UID: \"ed831fad-154d-46e3-b8fa-67dd41030c18\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.409536 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-dns-svc\") pod \"4d2a4c68-7925-4e6b-befe-032010898a54\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.409625 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-config\") pod \"4d2a4c68-7925-4e6b-befe-032010898a54\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.409702 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lv6h\" (UniqueName: \"kubernetes.io/projected/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-kube-api-access-9lv6h\") pod \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.409788 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2jtc\" (UniqueName: \"kubernetes.io/projected/4d2a4c68-7925-4e6b-befe-032010898a54-kube-api-access-b2jtc\") pod \"4d2a4c68-7925-4e6b-befe-032010898a54\" (UID: \"4d2a4c68-7925-4e6b-befe-032010898a54\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.410002 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-config\") pod \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\" (UID: \"7d6b464f-beb3-4c1c-a1f8-473e806ea1c2\") " Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.410046 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ed831fad-154d-46e3-b8fa-67dd41030c18" (UID: "ed831fad-154d-46e3-b8fa-67dd41030c18"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.410602 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-config" (OuterVolumeSpecName: "config") pod "7d6b464f-beb3-4c1c-a1f8-473e806ea1c2" (UID: "7d6b464f-beb3-4c1c-a1f8-473e806ea1c2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.410980 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-config" (OuterVolumeSpecName: "config") pod "ed831fad-154d-46e3-b8fa-67dd41030c18" (UID: "ed831fad-154d-46e3-b8fa-67dd41030c18"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.412308 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.412356 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.412372 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed831fad-154d-46e3-b8fa-67dd41030c18-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.416512 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d2a4c68-7925-4e6b-befe-032010898a54-kube-api-access-b2jtc" (OuterVolumeSpecName: "kube-api-access-b2jtc") pod "4d2a4c68-7925-4e6b-befe-032010898a54" (UID: "4d2a4c68-7925-4e6b-befe-032010898a54"). InnerVolumeSpecName "kube-api-access-b2jtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.416840 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-kube-api-access-9lv6h" (OuterVolumeSpecName: "kube-api-access-9lv6h") pod "7d6b464f-beb3-4c1c-a1f8-473e806ea1c2" (UID: "7d6b464f-beb3-4c1c-a1f8-473e806ea1c2"). InnerVolumeSpecName "kube-api-access-9lv6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.419362 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed831fad-154d-46e3-b8fa-67dd41030c18-kube-api-access-j9rxp" (OuterVolumeSpecName: "kube-api-access-j9rxp") pod "ed831fad-154d-46e3-b8fa-67dd41030c18" (UID: "ed831fad-154d-46e3-b8fa-67dd41030c18"). InnerVolumeSpecName "kube-api-access-j9rxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.435135 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-config" (OuterVolumeSpecName: "config") pod "4d2a4c68-7925-4e6b-befe-032010898a54" (UID: "4d2a4c68-7925-4e6b-befe-032010898a54"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.441695 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d2a4c68-7925-4e6b-befe-032010898a54" (UID: "4d2a4c68-7925-4e6b-befe-032010898a54"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.514622 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.514679 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lv6h\" (UniqueName: \"kubernetes.io/projected/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2-kube-api-access-9lv6h\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.514693 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2jtc\" (UniqueName: \"kubernetes.io/projected/4d2a4c68-7925-4e6b-befe-032010898a54-kube-api-access-b2jtc\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.514706 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9rxp\" (UniqueName: \"kubernetes.io/projected/ed831fad-154d-46e3-b8fa-67dd41030c18-kube-api-access-j9rxp\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.514717 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d2a4c68-7925-4e6b-befe-032010898a54-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.620428 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f7c4fd555-m2fgj"] Jan 28 13:05:45 crc kubenswrapper[4848]: I0128 13:05:45.632671 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f7c4fd555-m2fgj"] Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.298280 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57b9d58665-cgh5r" Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.298343 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb9bf987-6j72h" Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.366472 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57b9d58665-cgh5r"] Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.374431 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57b9d58665-cgh5r"] Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.406193 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb9bf987-6j72h"] Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.413751 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bb9bf987-6j72h"] Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.863357 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d2a4c68-7925-4e6b-befe-032010898a54" path="/var/lib/kubelet/pods/4d2a4c68-7925-4e6b-befe-032010898a54/volumes" Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.863942 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d6b464f-beb3-4c1c-a1f8-473e806ea1c2" path="/var/lib/kubelet/pods/7d6b464f-beb3-4c1c-a1f8-473e806ea1c2/volumes" Jan 28 13:05:46 crc kubenswrapper[4848]: I0128 13:05:46.864372 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed831fad-154d-46e3-b8fa-67dd41030c18" path="/var/lib/kubelet/pods/ed831fad-154d-46e3-b8fa-67dd41030c18/volumes" Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.375317 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3face43f-5a30-4c86-b004-3a98bb508b55","Type":"ContainerStarted","Data":"8263b98c5f5d0fc7f963ad89ff00e21b344b669e4b0761cf22206a794c8821a7"} Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.378514 4848 generic.go:334] "Generic (PLEG): container finished" podID="382bff15-0b1d-495c-be83-5f742696eb21" containerID="274adb7efa2d290dff4b6ffda85f305649326be0010fcbbf68bef6209969a298" exitCode=0 Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.378609 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86fbff885-vnwng" event={"ID":"382bff15-0b1d-495c-be83-5f742696eb21","Type":"ContainerDied","Data":"274adb7efa2d290dff4b6ffda85f305649326be0010fcbbf68bef6209969a298"} Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.381213 4848 generic.go:334] "Generic (PLEG): container finished" podID="675a7fe6-6f96-4af1-9881-6be45e190425" containerID="694c16265425daf695e072408e6b804f3029180a1fcb86532482a1fd45a5c061" exitCode=0 Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.381323 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" event={"ID":"675a7fe6-6f96-4af1-9881-6be45e190425","Type":"ContainerDied","Data":"694c16265425daf695e072408e6b804f3029180a1fcb86532482a1fd45a5c061"} Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.383287 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"7e5a41be-973a-4b25-991f-ccbdef21b343","Type":"ContainerStarted","Data":"5af2ab1f619ebb9b6110c967e378cb36f3af02c65eb6060f9abbcbd75c22abdd"} Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.383728 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.386884 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc","Type":"ContainerStarted","Data":"6f654a704553b1e6c543f48c612708d97447fc18bcccb3c076f332f8f3ebf69d"} Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.402119 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4d631c7a-117c-4a10-a7f6-28331bc4ae84","Type":"ContainerStarted","Data":"b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc"} Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.402734 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.497124 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=21.835403036 podStartE2EDuration="26.497099324s" podCreationTimestamp="2026-01-28 13:05:28 +0000 UTC" firstStartedPulling="2026-01-28 13:05:41.379653009 +0000 UTC m=+1168.291870057" lastFinishedPulling="2026-01-28 13:05:46.041349317 +0000 UTC m=+1172.953566345" observedRunningTime="2026-01-28 13:05:54.492751984 +0000 UTC m=+1181.404969022" watchObservedRunningTime="2026-01-28 13:05:54.497099324 +0000 UTC m=+1181.409316362" Jan 28 13:05:54 crc kubenswrapper[4848]: I0128 13:05:54.535461 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=13.864184534 podStartE2EDuration="24.535436589s" podCreationTimestamp="2026-01-28 13:05:30 +0000 UTC" firstStartedPulling="2026-01-28 13:05:42.351007157 +0000 UTC m=+1169.263224215" lastFinishedPulling="2026-01-28 13:05:53.022259232 +0000 UTC m=+1179.934476270" observedRunningTime="2026-01-28 13:05:54.522596565 +0000 UTC m=+1181.434813613" watchObservedRunningTime="2026-01-28 13:05:54.535436589 +0000 UTC m=+1181.447653627" Jan 28 13:05:55 crc kubenswrapper[4848]: I0128 13:05:55.413512 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"ff062566-cfd3-4393-b794-695d3473ef1a","Type":"ContainerStarted","Data":"3409854ac4ba76e447d80336a8d88001fb03843e9122063cd66cfa7a1c819997"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.430130 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9069c6ac-fe99-41c7-8ee1-0154d87e506c","Type":"ContainerStarted","Data":"a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.442960 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"59144d8e-c7a9-442f-bcc3-585322a77a97","Type":"ContainerStarted","Data":"c164f1b9319fa3bad25badff0a181d54d8c66389d8e8ac25ed22045e8331bc26"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.447059 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" event={"ID":"675a7fe6-6f96-4af1-9881-6be45e190425","Type":"ContainerStarted","Data":"6128a1c0c3f0bb5a87cb910df248a66b5b79596306b79b21bcccf1c13e1efe44"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.447697 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.457444 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6be2776-ada1-4c48-9588-9e488283ee6e","Type":"ContainerStarted","Data":"5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.472629 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerStarted","Data":"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.475412 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-59mkx" event={"ID":"49e7ea17-ef5b-4403-ad09-3553928c90e3","Type":"ContainerStarted","Data":"25a28e2a526b14bdaf4df79b2cb38e5eaf83f80ed531acd0ff077ff9338780fd"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.488344 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" podStartSLOduration=32.488312065 podStartE2EDuration="32.488312065s" podCreationTimestamp="2026-01-28 13:05:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:05:56.484193251 +0000 UTC m=+1183.396410289" watchObservedRunningTime="2026-01-28 13:05:56.488312065 +0000 UTC m=+1183.400529123" Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.502950 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h" event={"ID":"77e3e961-2cae-4bee-b73a-40336940b35c","Type":"ContainerStarted","Data":"e9831d2b6936d68780ce984e351dfc4f445d6768bb5ac2a201e2981d146189ec"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.503153 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-p6z9h" Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.508563 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"38cd06a1-9204-4a3f-bb28-9227a8023af9","Type":"ContainerStarted","Data":"6382814419ce66001318e0a372345c505758563ca72e60954208210948a74f3f"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.513237 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86fbff885-vnwng" event={"ID":"382bff15-0b1d-495c-be83-5f742696eb21","Type":"ContainerStarted","Data":"e813f2ee37e8590f124e904ec232db6009611c3c33cad783548a8eda0815051f"} Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.513536 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.665046 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86fbff885-vnwng" podStartSLOduration=32.665015373 podStartE2EDuration="32.665015373s" podCreationTimestamp="2026-01-28 13:05:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:05:56.658792422 +0000 UTC m=+1183.571009460" watchObservedRunningTime="2026-01-28 13:05:56.665015373 +0000 UTC m=+1183.577232411" Jan 28 13:05:56 crc kubenswrapper[4848]: I0128 13:05:56.743087 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-p6z9h" podStartSLOduration=14.700974335 podStartE2EDuration="22.743060669s" podCreationTimestamp="2026-01-28 13:05:34 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.131049277 +0000 UTC m=+1172.043266315" lastFinishedPulling="2026-01-28 13:05:53.173135611 +0000 UTC m=+1180.085352649" observedRunningTime="2026-01-28 13:05:56.728465217 +0000 UTC m=+1183.640682255" watchObservedRunningTime="2026-01-28 13:05:56.743060669 +0000 UTC m=+1183.655277707" Jan 28 13:05:57 crc kubenswrapper[4848]: I0128 13:05:57.526413 4848 generic.go:334] "Generic (PLEG): container finished" podID="49e7ea17-ef5b-4403-ad09-3553928c90e3" containerID="25a28e2a526b14bdaf4df79b2cb38e5eaf83f80ed531acd0ff077ff9338780fd" exitCode=0 Jan 28 13:05:57 crc kubenswrapper[4848]: I0128 13:05:57.526683 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-59mkx" event={"ID":"49e7ea17-ef5b-4403-ad09-3553928c90e3","Type":"ContainerDied","Data":"25a28e2a526b14bdaf4df79b2cb38e5eaf83f80ed531acd0ff077ff9338780fd"} Jan 28 13:05:57 crc kubenswrapper[4848]: I0128 13:05:57.527131 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-59mkx" event={"ID":"49e7ea17-ef5b-4403-ad09-3553928c90e3","Type":"ContainerStarted","Data":"2c2d66a481161c6df19fe3d5368fda249dbf26a5559fe30fdfc62acef2f3054a"} Jan 28 13:05:57 crc kubenswrapper[4848]: I0128 13:05:57.527161 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-59mkx" event={"ID":"49e7ea17-ef5b-4403-ad09-3553928c90e3","Type":"ContainerStarted","Data":"5180fb5a24bbaee570166d405ae504b41fb90bcd26c5bca6c463b50d6aa248b6"} Jan 28 13:05:57 crc kubenswrapper[4848]: I0128 13:05:57.527229 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:57 crc kubenswrapper[4848]: I0128 13:05:57.527319 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:05:57 crc kubenswrapper[4848]: I0128 13:05:57.558151 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-59mkx" podStartSLOduration=15.699696596 podStartE2EDuration="23.55812897s" podCreationTimestamp="2026-01-28 13:05:34 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.163863989 +0000 UTC m=+1172.076081027" lastFinishedPulling="2026-01-28 13:05:53.022296363 +0000 UTC m=+1179.934513401" observedRunningTime="2026-01-28 13:05:57.552611589 +0000 UTC m=+1184.464828647" watchObservedRunningTime="2026-01-28 13:05:57.55812897 +0000 UTC m=+1184.470346008" Jan 28 13:05:59 crc kubenswrapper[4848]: I0128 13:05:59.024385 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 28 13:05:59 crc kubenswrapper[4848]: I0128 13:05:59.555082 4848 generic.go:334] "Generic (PLEG): container finished" podID="ee209e0b-96f8-46ef-b1ff-2fac23c03ecc" containerID="6f654a704553b1e6c543f48c612708d97447fc18bcccb3c076f332f8f3ebf69d" exitCode=0 Jan 28 13:05:59 crc kubenswrapper[4848]: I0128 13:05:59.555197 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc","Type":"ContainerDied","Data":"6f654a704553b1e6c543f48c612708d97447fc18bcccb3c076f332f8f3ebf69d"} Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.009494 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.120082 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf86d7f99-lkd9s"] Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.120432 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" podUID="675a7fe6-6f96-4af1-9881-6be45e190425" containerName="dnsmasq-dns" containerID="cri-o://6128a1c0c3f0bb5a87cb910df248a66b5b79596306b79b21bcccf1c13e1efe44" gracePeriod=10 Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.125441 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.570378 4848 generic.go:334] "Generic (PLEG): container finished" podID="675a7fe6-6f96-4af1-9881-6be45e190425" containerID="6128a1c0c3f0bb5a87cb910df248a66b5b79596306b79b21bcccf1c13e1efe44" exitCode=0 Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.570764 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" event={"ID":"675a7fe6-6f96-4af1-9881-6be45e190425","Type":"ContainerDied","Data":"6128a1c0c3f0bb5a87cb910df248a66b5b79596306b79b21bcccf1c13e1efe44"} Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.574788 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ee209e0b-96f8-46ef-b1ff-2fac23c03ecc","Type":"ContainerStarted","Data":"c028ce0d9549f9f727ff7ff68559a92a8b33118dcc4aa5f8d2c7442cc0ef2018"} Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.580023 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"38cd06a1-9204-4a3f-bb28-9227a8023af9","Type":"ContainerStarted","Data":"46c09ba9f8ccd9f33f9dc404006eb1d9d865b0620cf051627101b0093f94de77"} Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.583808 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"59144d8e-c7a9-442f-bcc3-585322a77a97","Type":"ContainerStarted","Data":"13dfdf00c1f3ae503bd4229e284ff4db20fec83b75bc9f86932825e79ce3a328"} Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.587120 4848 generic.go:334] "Generic (PLEG): container finished" podID="3face43f-5a30-4c86-b004-3a98bb508b55" containerID="8263b98c5f5d0fc7f963ad89ff00e21b344b669e4b0761cf22206a794c8821a7" exitCode=0 Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.587171 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3face43f-5a30-4c86-b004-3a98bb508b55","Type":"ContainerDied","Data":"8263b98c5f5d0fc7f963ad89ff00e21b344b669e4b0761cf22206a794c8821a7"} Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.600942 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=27.55554237 podStartE2EDuration="35.600914764s" podCreationTimestamp="2026-01-28 13:05:25 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.1497059 +0000 UTC m=+1172.061922938" lastFinishedPulling="2026-01-28 13:05:53.195078294 +0000 UTC m=+1180.107295332" observedRunningTime="2026-01-28 13:06:00.600225475 +0000 UTC m=+1187.512442513" watchObservedRunningTime="2026-01-28 13:06:00.600914764 +0000 UTC m=+1187.513131802" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.658356 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=10.112778179 podStartE2EDuration="24.658334982s" podCreationTimestamp="2026-01-28 13:05:36 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.133288428 +0000 UTC m=+1172.045505466" lastFinishedPulling="2026-01-28 13:05:59.678845241 +0000 UTC m=+1186.591062269" observedRunningTime="2026-01-28 13:06:00.644524963 +0000 UTC m=+1187.556742011" watchObservedRunningTime="2026-01-28 13:06:00.658334982 +0000 UTC m=+1187.570552020" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.675111 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=12.162484167 podStartE2EDuration="26.675089373s" podCreationTimestamp="2026-01-28 13:05:34 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.136524938 +0000 UTC m=+1172.048741976" lastFinishedPulling="2026-01-28 13:05:59.649130144 +0000 UTC m=+1186.561347182" observedRunningTime="2026-01-28 13:06:00.668461651 +0000 UTC m=+1187.580678739" watchObservedRunningTime="2026-01-28 13:06:00.675089373 +0000 UTC m=+1187.587306411" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.743586 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.806693 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-config\") pod \"675a7fe6-6f96-4af1-9881-6be45e190425\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.806767 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46zr7\" (UniqueName: \"kubernetes.io/projected/675a7fe6-6f96-4af1-9881-6be45e190425-kube-api-access-46zr7\") pod \"675a7fe6-6f96-4af1-9881-6be45e190425\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.806842 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-dns-svc\") pod \"675a7fe6-6f96-4af1-9881-6be45e190425\" (UID: \"675a7fe6-6f96-4af1-9881-6be45e190425\") " Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.817293 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/675a7fe6-6f96-4af1-9881-6be45e190425-kube-api-access-46zr7" (OuterVolumeSpecName: "kube-api-access-46zr7") pod "675a7fe6-6f96-4af1-9881-6be45e190425" (UID: "675a7fe6-6f96-4af1-9881-6be45e190425"). InnerVolumeSpecName "kube-api-access-46zr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.872698 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "675a7fe6-6f96-4af1-9881-6be45e190425" (UID: "675a7fe6-6f96-4af1-9881-6be45e190425"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.881955 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-config" (OuterVolumeSpecName: "config") pod "675a7fe6-6f96-4af1-9881-6be45e190425" (UID: "675a7fe6-6f96-4af1-9881-6be45e190425"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.910079 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.913007 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46zr7\" (UniqueName: \"kubernetes.io/projected/675a7fe6-6f96-4af1-9881-6be45e190425-kube-api-access-46zr7\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:00 crc kubenswrapper[4848]: I0128 13:06:00.913026 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675a7fe6-6f96-4af1-9881-6be45e190425-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.011363 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.095307 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.108611 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc455bbff-z8zr4"] Jan 28 13:06:01 crc kubenswrapper[4848]: E0128 13:06:01.109111 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675a7fe6-6f96-4af1-9881-6be45e190425" containerName="dnsmasq-dns" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.109130 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="675a7fe6-6f96-4af1-9881-6be45e190425" containerName="dnsmasq-dns" Jan 28 13:06:01 crc kubenswrapper[4848]: E0128 13:06:01.109148 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d2a4c68-7925-4e6b-befe-032010898a54" containerName="init" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.109155 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d2a4c68-7925-4e6b-befe-032010898a54" containerName="init" Jan 28 13:06:01 crc kubenswrapper[4848]: E0128 13:06:01.109180 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675a7fe6-6f96-4af1-9881-6be45e190425" containerName="init" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.109189 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="675a7fe6-6f96-4af1-9881-6be45e190425" containerName="init" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.109425 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d2a4c68-7925-4e6b-befe-032010898a54" containerName="init" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.109454 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="675a7fe6-6f96-4af1-9881-6be45e190425" containerName="dnsmasq-dns" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.110443 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.181160 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc455bbff-z8zr4"] Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.217532 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx974\" (UniqueName: \"kubernetes.io/projected/c7022cdb-e9ff-4e21-a066-f6329ceb6463-kube-api-access-jx974\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.217667 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-config\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.217702 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-dns-svc\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.319170 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-dns-svc\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.319281 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx974\" (UniqueName: \"kubernetes.io/projected/c7022cdb-e9ff-4e21-a066-f6329ceb6463-kube-api-access-jx974\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.319366 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-config\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.320430 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-config\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.320762 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-dns-svc\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.345761 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx974\" (UniqueName: \"kubernetes.io/projected/c7022cdb-e9ff-4e21-a066-f6329ceb6463-kube-api-access-jx974\") pod \"dnsmasq-dns-6bc455bbff-z8zr4\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.444966 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.628104 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" event={"ID":"675a7fe6-6f96-4af1-9881-6be45e190425","Type":"ContainerDied","Data":"ac008858591add370598db62ca6930abd10043935f176e371b643901426e21ae"} Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.628574 4848 scope.go:117] "RemoveContainer" containerID="6128a1c0c3f0bb5a87cb910df248a66b5b79596306b79b21bcccf1c13e1efe44" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.628927 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf86d7f99-lkd9s" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.635704 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3face43f-5a30-4c86-b004-3a98bb508b55","Type":"ContainerStarted","Data":"8051d741653890f299d103fb29c325026af7d246a081580aa565f66557547b43"} Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.688205 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=26.747683469000002 podStartE2EDuration="34.688174799s" podCreationTimestamp="2026-01-28 13:05:27 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.198842381 +0000 UTC m=+1172.111059419" lastFinishedPulling="2026-01-28 13:05:53.139333711 +0000 UTC m=+1180.051550749" observedRunningTime="2026-01-28 13:06:01.679641504 +0000 UTC m=+1188.591858542" watchObservedRunningTime="2026-01-28 13:06:01.688174799 +0000 UTC m=+1188.600391837" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.688615 4848 scope.go:117] "RemoveContainer" containerID="694c16265425daf695e072408e6b804f3029180a1fcb86532482a1fd45a5c061" Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.744481 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf86d7f99-lkd9s"] Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.765358 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bf86d7f99-lkd9s"] Jan 28 13:06:01 crc kubenswrapper[4848]: I0128 13:06:01.981240 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc455bbff-z8zr4"] Jan 28 13:06:02 crc kubenswrapper[4848]: W0128 13:06:02.068788 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7022cdb_e9ff_4e21_a066_f6329ceb6463.slice/crio-e2661bf315707fae32567e5af9b6dfe288a6a99feb05af053a0630838a7e7369 WatchSource:0}: Error finding container e2661bf315707fae32567e5af9b6dfe288a6a99feb05af053a0630838a7e7369: Status 404 returned error can't find the container with id e2661bf315707fae32567e5af9b6dfe288a6a99feb05af053a0630838a7e7369 Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.289131 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.298499 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.301718 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.302039 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-czzmg" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.302347 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.302555 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.321975 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.353153 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.413631 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.443035 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af520475-92ee-41e6-90e1-7ad3d9609d51-cache\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.443471 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af520475-92ee-41e6-90e1-7ad3d9609d51-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.443606 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af520475-92ee-41e6-90e1-7ad3d9609d51-lock\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.443659 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.443828 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5rj7\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-kube-api-access-n5rj7\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.443947 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.545626 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af520475-92ee-41e6-90e1-7ad3d9609d51-cache\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546176 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af520475-92ee-41e6-90e1-7ad3d9609d51-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546220 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af520475-92ee-41e6-90e1-7ad3d9609d51-lock\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546267 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546277 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/af520475-92ee-41e6-90e1-7ad3d9609d51-cache\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546326 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5rj7\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-kube-api-access-n5rj7\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546385 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: E0128 13:06:02.546641 4848 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 13:06:02 crc kubenswrapper[4848]: E0128 13:06:02.546679 4848 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546645 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.546887 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/af520475-92ee-41e6-90e1-7ad3d9609d51-lock\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: E0128 13:06:02.546757 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift podName:af520475-92ee-41e6-90e1-7ad3d9609d51 nodeName:}" failed. No retries permitted until 2026-01-28 13:06:03.046725865 +0000 UTC m=+1189.958942903 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift") pod "swift-storage-0" (UID: "af520475-92ee-41e6-90e1-7ad3d9609d51") : configmap "swift-ring-files" not found Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.554405 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af520475-92ee-41e6-90e1-7ad3d9609d51-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.601273 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5rj7\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-kube-api-access-n5rj7\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.606568 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.669405 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" event={"ID":"c7022cdb-e9ff-4e21-a066-f6329ceb6463","Type":"ContainerStarted","Data":"e2661bf315707fae32567e5af9b6dfe288a6a99feb05af053a0630838a7e7369"} Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.680472 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.729701 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 28 13:06:02 crc kubenswrapper[4848]: I0128 13:06:02.869637 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="675a7fe6-6f96-4af1-9881-6be45e190425" path="/var/lib/kubelet/pods/675a7fe6-6f96-4af1-9881-6be45e190425/volumes" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.011863 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.060075 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:03 crc kubenswrapper[4848]: E0128 13:06:03.060299 4848 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 13:06:03 crc kubenswrapper[4848]: E0128 13:06:03.060315 4848 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 13:06:03 crc kubenswrapper[4848]: E0128 13:06:03.060370 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift podName:af520475-92ee-41e6-90e1-7ad3d9609d51 nodeName:}" failed. No retries permitted until 2026-01-28 13:06:04.060349748 +0000 UTC m=+1190.972566786 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift") pod "swift-storage-0" (UID: "af520475-92ee-41e6-90e1-7ad3d9609d51") : configmap "swift-ring-files" not found Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.072601 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wk5zd"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.086076 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.089363 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.096729 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.119374 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc455bbff-z8zr4"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.143364 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wk5zd"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.162151 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqdqs\" (UniqueName: \"kubernetes.io/projected/55d9487c-8ef4-4859-b3ca-6bd679cb1854-kube-api-access-tqdqs\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.162207 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55d9487c-8ef4-4859-b3ca-6bd679cb1854-config\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.162232 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d9487c-8ef4-4859-b3ca-6bd679cb1854-combined-ca-bundle\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.162292 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/55d9487c-8ef4-4859-b3ca-6bd679cb1854-ovn-rundir\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.162312 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/55d9487c-8ef4-4859-b3ca-6bd679cb1854-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.162348 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/55d9487c-8ef4-4859-b3ca-6bd679cb1854-ovs-rundir\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.177234 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69b75bd887-26wjk"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.179105 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.182807 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.201749 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69b75bd887-26wjk"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.265740 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/55d9487c-8ef4-4859-b3ca-6bd679cb1854-ovs-rundir\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.266206 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/55d9487c-8ef4-4859-b3ca-6bd679cb1854-ovs-rundir\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.266354 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-config\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.266552 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldx7q\" (UniqueName: \"kubernetes.io/projected/cc84b9bd-f71b-40a6-8117-8e465aa52286-kube-api-access-ldx7q\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.266585 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqdqs\" (UniqueName: \"kubernetes.io/projected/55d9487c-8ef4-4859-b3ca-6bd679cb1854-kube-api-access-tqdqs\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.266845 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55d9487c-8ef4-4859-b3ca-6bd679cb1854-config\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.266883 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d9487c-8ef4-4859-b3ca-6bd679cb1854-combined-ca-bundle\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.266908 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-dns-svc\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.267200 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-ovsdbserver-nb\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.267312 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/55d9487c-8ef4-4859-b3ca-6bd679cb1854-ovn-rundir\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.268362 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55d9487c-8ef4-4859-b3ca-6bd679cb1854-config\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.268611 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/55d9487c-8ef4-4859-b3ca-6bd679cb1854-ovn-rundir\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.269485 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/55d9487c-8ef4-4859-b3ca-6bd679cb1854-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.276747 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/55d9487c-8ef4-4859-b3ca-6bd679cb1854-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.290073 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqdqs\" (UniqueName: \"kubernetes.io/projected/55d9487c-8ef4-4859-b3ca-6bd679cb1854-kube-api-access-tqdqs\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.293603 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d9487c-8ef4-4859-b3ca-6bd679cb1854-combined-ca-bundle\") pod \"ovn-controller-metrics-wk5zd\" (UID: \"55d9487c-8ef4-4859-b3ca-6bd679cb1854\") " pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.372951 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldx7q\" (UniqueName: \"kubernetes.io/projected/cc84b9bd-f71b-40a6-8117-8e465aa52286-kube-api-access-ldx7q\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.373574 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-dns-svc\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.373630 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-ovsdbserver-nb\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.373707 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-config\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.374712 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-ovsdbserver-nb\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.374904 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-config\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.375365 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-dns-svc\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.416294 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldx7q\" (UniqueName: \"kubernetes.io/projected/cc84b9bd-f71b-40a6-8117-8e465aa52286-kube-api-access-ldx7q\") pod \"dnsmasq-dns-69b75bd887-26wjk\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.462254 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wk5zd" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.501876 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.545307 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69b75bd887-26wjk"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.573728 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5896b68bd7-jwsfl"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.575831 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.581064 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.588975 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5896b68bd7-jwsfl"] Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.678798 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-sb\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.680012 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-nb\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.680089 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxqb5\" (UniqueName: \"kubernetes.io/projected/ed2e9f9f-591c-46cf-86b1-3c530be18542-kube-api-access-nxqb5\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.680119 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-config\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.680146 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-dns-svc\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.711185 4848 generic.go:334] "Generic (PLEG): container finished" podID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerID="822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073" exitCode=0 Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.711307 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerDied","Data":"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073"} Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.724720 4848 generic.go:334] "Generic (PLEG): container finished" podID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerID="cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711" exitCode=0 Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.724873 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" event={"ID":"c7022cdb-e9ff-4e21-a066-f6329ceb6463","Type":"ContainerDied","Data":"cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711"} Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.783639 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-nb\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.783728 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxqb5\" (UniqueName: \"kubernetes.io/projected/ed2e9f9f-591c-46cf-86b1-3c530be18542-kube-api-access-nxqb5\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.783766 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-config\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.783793 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-dns-svc\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.783900 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-sb\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.785619 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-sb\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.787218 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-nb\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.789473 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-config\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.790524 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-dns-svc\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.823078 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.871466 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxqb5\" (UniqueName: \"kubernetes.io/projected/ed2e9f9f-591c-46cf-86b1-3c530be18542-kube-api-access-nxqb5\") pod \"dnsmasq-dns-5896b68bd7-jwsfl\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:03 crc kubenswrapper[4848]: I0128 13:06:03.930827 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.093728 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:04 crc kubenswrapper[4848]: E0128 13:06:04.094020 4848 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 13:06:04 crc kubenswrapper[4848]: E0128 13:06:04.094075 4848 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 13:06:04 crc kubenswrapper[4848]: E0128 13:06:04.094152 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift podName:af520475-92ee-41e6-90e1-7ad3d9609d51 nodeName:}" failed. No retries permitted until 2026-01-28 13:06:06.094125892 +0000 UTC m=+1193.006342930 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift") pod "swift-storage-0" (UID: "af520475-92ee-41e6-90e1-7ad3d9609d51") : configmap "swift-ring-files" not found Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.179174 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wk5zd"] Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.205196 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69b75bd887-26wjk"] Jan 28 13:06:04 crc kubenswrapper[4848]: W0128 13:06:04.234425 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc84b9bd_f71b_40a6_8117_8e465aa52286.slice/crio-6010f309142b1817e107b80f13af06bef4b270b5793f477fc87f07b2bb6e9c61 WatchSource:0}: Error finding container 6010f309142b1817e107b80f13af06bef4b270b5793f477fc87f07b2bb6e9c61: Status 404 returned error can't find the container with id 6010f309142b1817e107b80f13af06bef4b270b5793f477fc87f07b2bb6e9c61 Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.259512 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.266146 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.270794 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.271058 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.271172 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.271408 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-jlzq7" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.272901 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.303529 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.303600 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31b7f744-13ea-445d-99a0-57155c52e332-scripts\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.303631 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/31b7f744-13ea-445d-99a0-57155c52e332-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.303675 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.303737 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.303763 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b7f744-13ea-445d-99a0-57155c52e332-config\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.303806 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c9vz\" (UniqueName: \"kubernetes.io/projected/31b7f744-13ea-445d-99a0-57155c52e332-kube-api-access-5c9vz\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.405960 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.406485 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b7f744-13ea-445d-99a0-57155c52e332-config\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.406537 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c9vz\" (UniqueName: \"kubernetes.io/projected/31b7f744-13ea-445d-99a0-57155c52e332-kube-api-access-5c9vz\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.406601 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.406659 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31b7f744-13ea-445d-99a0-57155c52e332-scripts\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.406691 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/31b7f744-13ea-445d-99a0-57155c52e332-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.406755 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.413410 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b7f744-13ea-445d-99a0-57155c52e332-config\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.413410 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31b7f744-13ea-445d-99a0-57155c52e332-scripts\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.413888 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/31b7f744-13ea-445d-99a0-57155c52e332-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.415798 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.418412 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.419664 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b7f744-13ea-445d-99a0-57155c52e332-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.438514 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c9vz\" (UniqueName: \"kubernetes.io/projected/31b7f744-13ea-445d-99a0-57155c52e332-kube-api-access-5c9vz\") pod \"ovn-northd-0\" (UID: \"31b7f744-13ea-445d-99a0-57155c52e332\") " pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.624971 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.682002 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5896b68bd7-jwsfl"] Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.743803 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" event={"ID":"ed2e9f9f-591c-46cf-86b1-3c530be18542","Type":"ContainerStarted","Data":"ee0f8ce6d295bf13500e24f9c603da3f3a4b3b82e2fe609b5ab60ff8e5e573f1"} Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.755638 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" event={"ID":"cc84b9bd-f71b-40a6-8117-8e465aa52286","Type":"ContainerStarted","Data":"d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05"} Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.755695 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" event={"ID":"cc84b9bd-f71b-40a6-8117-8e465aa52286","Type":"ContainerStarted","Data":"6010f309142b1817e107b80f13af06bef4b270b5793f477fc87f07b2bb6e9c61"} Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.788996 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" event={"ID":"c7022cdb-e9ff-4e21-a066-f6329ceb6463","Type":"ContainerStarted","Data":"35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013"} Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.789182 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" podUID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerName="dnsmasq-dns" containerID="cri-o://35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013" gracePeriod=10 Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.789462 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.824398 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wk5zd" event={"ID":"55d9487c-8ef4-4859-b3ca-6bd679cb1854","Type":"ContainerStarted","Data":"43522fb088ca5cf57198e29674532e44216a59412dbe12cd6345e536867457aa"} Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.824495 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wk5zd" event={"ID":"55d9487c-8ef4-4859-b3ca-6bd679cb1854","Type":"ContainerStarted","Data":"c41a5952785853daa741e185f8c1bd399521bcf09421be5df563adb3e5bfa7c6"} Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.866166 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" podStartSLOduration=3.866135859 podStartE2EDuration="3.866135859s" podCreationTimestamp="2026-01-28 13:06:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:04.819783565 +0000 UTC m=+1191.732000613" watchObservedRunningTime="2026-01-28 13:06:04.866135859 +0000 UTC m=+1191.778352897" Jan 28 13:06:04 crc kubenswrapper[4848]: I0128 13:06:04.871667 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wk5zd" podStartSLOduration=1.8716531509999998 podStartE2EDuration="1.871653151s" podCreationTimestamp="2026-01-28 13:06:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:04.853543863 +0000 UTC m=+1191.765760911" watchObservedRunningTime="2026-01-28 13:06:04.871653151 +0000 UTC m=+1191.783870189" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.461943 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.479545 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.673623 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldx7q\" (UniqueName: \"kubernetes.io/projected/cc84b9bd-f71b-40a6-8117-8e465aa52286-kube-api-access-ldx7q\") pod \"cc84b9bd-f71b-40a6-8117-8e465aa52286\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.673742 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-config\") pod \"cc84b9bd-f71b-40a6-8117-8e465aa52286\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.673807 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-dns-svc\") pod \"cc84b9bd-f71b-40a6-8117-8e465aa52286\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.673917 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-ovsdbserver-nb\") pod \"cc84b9bd-f71b-40a6-8117-8e465aa52286\" (UID: \"cc84b9bd-f71b-40a6-8117-8e465aa52286\") " Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.689836 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc84b9bd-f71b-40a6-8117-8e465aa52286-kube-api-access-ldx7q" (OuterVolumeSpecName: "kube-api-access-ldx7q") pod "cc84b9bd-f71b-40a6-8117-8e465aa52286" (UID: "cc84b9bd-f71b-40a6-8117-8e465aa52286"). InnerVolumeSpecName "kube-api-access-ldx7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.724977 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cc84b9bd-f71b-40a6-8117-8e465aa52286" (UID: "cc84b9bd-f71b-40a6-8117-8e465aa52286"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.725702 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cc84b9bd-f71b-40a6-8117-8e465aa52286" (UID: "cc84b9bd-f71b-40a6-8117-8e465aa52286"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.728887 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-config" (OuterVolumeSpecName: "config") pod "cc84b9bd-f71b-40a6-8117-8e465aa52286" (UID: "cc84b9bd-f71b-40a6-8117-8e465aa52286"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.776970 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldx7q\" (UniqueName: \"kubernetes.io/projected/cc84b9bd-f71b-40a6-8117-8e465aa52286-kube-api-access-ldx7q\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.777386 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.777549 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.777645 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc84b9bd-f71b-40a6-8117-8e465aa52286-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.794100 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.857774 4848 generic.go:334] "Generic (PLEG): container finished" podID="cc84b9bd-f71b-40a6-8117-8e465aa52286" containerID="d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05" exitCode=0 Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.857837 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.857869 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" event={"ID":"cc84b9bd-f71b-40a6-8117-8e465aa52286","Type":"ContainerDied","Data":"d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05"} Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.857925 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69b75bd887-26wjk" event={"ID":"cc84b9bd-f71b-40a6-8117-8e465aa52286","Type":"ContainerDied","Data":"6010f309142b1817e107b80f13af06bef4b270b5793f477fc87f07b2bb6e9c61"} Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.857962 4848 scope.go:117] "RemoveContainer" containerID="d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.860991 4848 generic.go:334] "Generic (PLEG): container finished" podID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerID="35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013" exitCode=0 Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.861066 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.861091 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" event={"ID":"c7022cdb-e9ff-4e21-a066-f6329ceb6463","Type":"ContainerDied","Data":"35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013"} Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.861131 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc455bbff-z8zr4" event={"ID":"c7022cdb-e9ff-4e21-a066-f6329ceb6463","Type":"ContainerDied","Data":"e2661bf315707fae32567e5af9b6dfe288a6a99feb05af053a0630838a7e7369"} Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.863461 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"31b7f744-13ea-445d-99a0-57155c52e332","Type":"ContainerStarted","Data":"f1320210e11cddc7eb48b630a5753a688ba9a28623f6f69f3b280460dda84420"} Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.867332 4848 generic.go:334] "Generic (PLEG): container finished" podID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerID="f7ac5ba402f76312cd61ef2a1bc9c8df988c72996840e00d4695884becc0cf57" exitCode=0 Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.867459 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" event={"ID":"ed2e9f9f-591c-46cf-86b1-3c530be18542","Type":"ContainerDied","Data":"f7ac5ba402f76312cd61ef2a1bc9c8df988c72996840e00d4695884becc0cf57"} Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.921473 4848 scope.go:117] "RemoveContainer" containerID="d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05" Jan 28 13:06:05 crc kubenswrapper[4848]: E0128 13:06:05.925094 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05\": container with ID starting with d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05 not found: ID does not exist" containerID="d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.925148 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05"} err="failed to get container status \"d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05\": rpc error: code = NotFound desc = could not find container \"d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05\": container with ID starting with d40f5f9d60a03c5d7680f07bb6b3528a906173672dd7172fbe805af725e14a05 not found: ID does not exist" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.925179 4848 scope.go:117] "RemoveContainer" containerID="35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.980895 4848 scope.go:117] "RemoveContainer" containerID="cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711" Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.981200 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69b75bd887-26wjk"] Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.982818 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx974\" (UniqueName: \"kubernetes.io/projected/c7022cdb-e9ff-4e21-a066-f6329ceb6463-kube-api-access-jx974\") pod \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.983219 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-config\") pod \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.983418 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-dns-svc\") pod \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\" (UID: \"c7022cdb-e9ff-4e21-a066-f6329ceb6463\") " Jan 28 13:06:05 crc kubenswrapper[4848]: I0128 13:06:05.989498 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-69b75bd887-26wjk"] Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.004396 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7022cdb-e9ff-4e21-a066-f6329ceb6463-kube-api-access-jx974" (OuterVolumeSpecName: "kube-api-access-jx974") pod "c7022cdb-e9ff-4e21-a066-f6329ceb6463" (UID: "c7022cdb-e9ff-4e21-a066-f6329ceb6463"). InnerVolumeSpecName "kube-api-access-jx974". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.062409 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c7022cdb-e9ff-4e21-a066-f6329ceb6463" (UID: "c7022cdb-e9ff-4e21-a066-f6329ceb6463"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.077960 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-config" (OuterVolumeSpecName: "config") pod "c7022cdb-e9ff-4e21-a066-f6329ceb6463" (UID: "c7022cdb-e9ff-4e21-a066-f6329ceb6463"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.084040 4848 scope.go:117] "RemoveContainer" containerID="35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013" Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.084637 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013\": container with ID starting with 35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013 not found: ID does not exist" containerID="35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.084688 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013"} err="failed to get container status \"35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013\": rpc error: code = NotFound desc = could not find container \"35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013\": container with ID starting with 35aa886aa520adbb33854bd2a4d36b7c680bc55abcad8e5424994eb24c333013 not found: ID does not exist" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.084723 4848 scope.go:117] "RemoveContainer" containerID="cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711" Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.085049 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711\": container with ID starting with cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711 not found: ID does not exist" containerID="cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.085077 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711"} err="failed to get container status \"cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711\": rpc error: code = NotFound desc = could not find container \"cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711\": container with ID starting with cc0fa83e634db6c48b9a28cdb969e3a3859fdbc4b6f4d2e4dde1625c3b60f711 not found: ID does not exist" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.104999 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.105117 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.105129 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7022cdb-e9ff-4e21-a066-f6329ceb6463-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.105139 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx974\" (UniqueName: \"kubernetes.io/projected/c7022cdb-e9ff-4e21-a066-f6329ceb6463-kube-api-access-jx974\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.105275 4848 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.105321 4848 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.105414 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift podName:af520475-92ee-41e6-90e1-7ad3d9609d51 nodeName:}" failed. No retries permitted until 2026-01-28 13:06:10.105384184 +0000 UTC m=+1197.017601402 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift") pod "swift-storage-0" (UID: "af520475-92ee-41e6-90e1-7ad3d9609d51") : configmap "swift-ring-files" not found Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.187841 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-mgbt4"] Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.188338 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerName="dnsmasq-dns" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.188364 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerName="dnsmasq-dns" Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.188408 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerName="init" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.188416 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerName="init" Jan 28 13:06:06 crc kubenswrapper[4848]: E0128 13:06:06.188431 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc84b9bd-f71b-40a6-8117-8e465aa52286" containerName="init" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.188438 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc84b9bd-f71b-40a6-8117-8e465aa52286" containerName="init" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.188611 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" containerName="dnsmasq-dns" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.188641 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc84b9bd-f71b-40a6-8117-8e465aa52286" containerName="init" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.189518 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.193315 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.193639 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.195519 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.207624 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz747\" (UniqueName: \"kubernetes.io/projected/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-kube-api-access-xz747\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.207708 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-swiftconf\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.207776 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-dispersionconf\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.207814 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-combined-ca-bundle\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.207850 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-etc-swift\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.207954 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-scripts\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.208029 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-ring-data-devices\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.214333 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-mgbt4"] Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.233396 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc455bbff-z8zr4"] Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.246321 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc455bbff-z8zr4"] Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.311766 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-dispersionconf\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.311843 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-combined-ca-bundle\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.311910 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-etc-swift\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.311948 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-scripts\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.312006 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-ring-data-devices\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.312597 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz747\" (UniqueName: \"kubernetes.io/projected/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-kube-api-access-xz747\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.312678 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-swiftconf\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.312849 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-etc-swift\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.314416 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-ring-data-devices\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.314765 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-scripts\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.316474 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-dispersionconf\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.319050 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-combined-ca-bundle\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.320896 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-swiftconf\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.335097 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz747\" (UniqueName: \"kubernetes.io/projected/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-kube-api-access-xz747\") pod \"swift-ring-rebalance-mgbt4\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.511237 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.891288 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7022cdb-e9ff-4e21-a066-f6329ceb6463" path="/var/lib/kubelet/pods/c7022cdb-e9ff-4e21-a066-f6329ceb6463/volumes" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.896519 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc84b9bd-f71b-40a6-8117-8e465aa52286" path="/var/lib/kubelet/pods/cc84b9bd-f71b-40a6-8117-8e465aa52286/volumes" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.924305 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" event={"ID":"ed2e9f9f-591c-46cf-86b1-3c530be18542","Type":"ContainerStarted","Data":"3ae884951468079c34596a7284856a73c23a9d2189e103d63ee24e42de51e857"} Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.924721 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:06 crc kubenswrapper[4848]: I0128 13:06:06.953241 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" podStartSLOduration=3.953220645 podStartE2EDuration="3.953220645s" podCreationTimestamp="2026-01-28 13:06:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:06.951131258 +0000 UTC m=+1193.863348296" watchObservedRunningTime="2026-01-28 13:06:06.953220645 +0000 UTC m=+1193.865437683" Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.152455 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.152525 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.180865 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-mgbt4"] Jan 28 13:06:07 crc kubenswrapper[4848]: W0128 13:06:07.188867 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7ead1bd_9cbb_4b0c_adb8_bf91b66fae1d.slice/crio-9f313e07f1f5c7c08c85c0200e7b396600c6c41dd77f6ed284867b039e3a16c2 WatchSource:0}: Error finding container 9f313e07f1f5c7c08c85c0200e7b396600c6c41dd77f6ed284867b039e3a16c2: Status 404 returned error can't find the container with id 9f313e07f1f5c7c08c85c0200e7b396600c6c41dd77f6ed284867b039e3a16c2 Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.567380 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.944453 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"31b7f744-13ea-445d-99a0-57155c52e332","Type":"ContainerStarted","Data":"ebce4a45b374778c557b3d022d1c0900eb2eb592ff04ee0782d8c175c4a2d538"} Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.944511 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"31b7f744-13ea-445d-99a0-57155c52e332","Type":"ContainerStarted","Data":"1fbc8b82273ef39509ed06c53ab71d07024004299264d2a3abfa6d91a87c282e"} Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.945594 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.949741 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mgbt4" event={"ID":"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d","Type":"ContainerStarted","Data":"9f313e07f1f5c7c08c85c0200e7b396600c6c41dd77f6ed284867b039e3a16c2"} Jan 28 13:06:07 crc kubenswrapper[4848]: I0128 13:06:07.969353 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.73733678 podStartE2EDuration="3.969325444s" podCreationTimestamp="2026-01-28 13:06:04 +0000 UTC" firstStartedPulling="2026-01-28 13:06:05.508727968 +0000 UTC m=+1192.420945006" lastFinishedPulling="2026-01-28 13:06:06.740716632 +0000 UTC m=+1193.652933670" observedRunningTime="2026-01-28 13:06:07.965946101 +0000 UTC m=+1194.878163139" watchObservedRunningTime="2026-01-28 13:06:07.969325444 +0000 UTC m=+1194.881542492" Jan 28 13:06:08 crc kubenswrapper[4848]: I0128 13:06:08.146057 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 28 13:06:08 crc kubenswrapper[4848]: I0128 13:06:08.493488 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 28 13:06:08 crc kubenswrapper[4848]: I0128 13:06:08.493921 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.037727 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-9328-account-create-update-8vsqt"] Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.039483 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.048484 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9328-account-create-update-8vsqt"] Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.073778 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.129037 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-zk4jd"] Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.134735 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.146369 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zk4jd"] Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.177091 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-operator-scripts\") pod \"placement-9328-account-create-update-8vsqt\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.177168 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdmcm\" (UniqueName: \"kubernetes.io/projected/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-kube-api-access-jdmcm\") pod \"placement-9328-account-create-update-8vsqt\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.279589 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-operator-scripts\") pod \"placement-9328-account-create-update-8vsqt\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.279733 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdmcm\" (UniqueName: \"kubernetes.io/projected/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-kube-api-access-jdmcm\") pod \"placement-9328-account-create-update-8vsqt\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.279855 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r9gs\" (UniqueName: \"kubernetes.io/projected/17c614c4-a02a-4224-9ee5-b334451f0671-kube-api-access-7r9gs\") pod \"placement-db-create-zk4jd\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.279907 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/17c614c4-a02a-4224-9ee5-b334451f0671-operator-scripts\") pod \"placement-db-create-zk4jd\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.280547 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-operator-scripts\") pod \"placement-9328-account-create-update-8vsqt\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.323124 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdmcm\" (UniqueName: \"kubernetes.io/projected/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-kube-api-access-jdmcm\") pod \"placement-9328-account-create-update-8vsqt\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.382642 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r9gs\" (UniqueName: \"kubernetes.io/projected/17c614c4-a02a-4224-9ee5-b334451f0671-kube-api-access-7r9gs\") pod \"placement-db-create-zk4jd\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.382712 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/17c614c4-a02a-4224-9ee5-b334451f0671-operator-scripts\") pod \"placement-db-create-zk4jd\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.383755 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/17c614c4-a02a-4224-9ee5-b334451f0671-operator-scripts\") pod \"placement-db-create-zk4jd\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.402955 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r9gs\" (UniqueName: \"kubernetes.io/projected/17c614c4-a02a-4224-9ee5-b334451f0671-kube-api-access-7r9gs\") pod \"placement-db-create-zk4jd\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.421123 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:09 crc kubenswrapper[4848]: I0128 13:06:09.469017 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:10 crc kubenswrapper[4848]: I0128 13:06:10.204593 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:10 crc kubenswrapper[4848]: E0128 13:06:10.205168 4848 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 13:06:10 crc kubenswrapper[4848]: E0128 13:06:10.205440 4848 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 13:06:10 crc kubenswrapper[4848]: E0128 13:06:10.205497 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift podName:af520475-92ee-41e6-90e1-7ad3d9609d51 nodeName:}" failed. No retries permitted until 2026-01-28 13:06:18.205477388 +0000 UTC m=+1205.117694426 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift") pod "swift-storage-0" (UID: "af520475-92ee-41e6-90e1-7ad3d9609d51") : configmap "swift-ring-files" not found Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.121109 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-j2whm"] Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.134877 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-j2whm"] Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.135035 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.225188 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfch5\" (UniqueName: \"kubernetes.io/projected/ef583ead-2fa2-4c76-9753-2dc35141fdea-kube-api-access-dfch5\") pod \"watcher-db-create-j2whm\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.225743 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef583ead-2fa2-4c76-9753-2dc35141fdea-operator-scripts\") pod \"watcher-db-create-j2whm\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.228094 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-c692-account-create-update-9lnsr"] Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.229943 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.233715 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.241179 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-c692-account-create-update-9lnsr"] Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.327522 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnxnk\" (UniqueName: \"kubernetes.io/projected/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-kube-api-access-nnxnk\") pod \"watcher-c692-account-create-update-9lnsr\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.327626 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfch5\" (UniqueName: \"kubernetes.io/projected/ef583ead-2fa2-4c76-9753-2dc35141fdea-kube-api-access-dfch5\") pod \"watcher-db-create-j2whm\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.327672 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef583ead-2fa2-4c76-9753-2dc35141fdea-operator-scripts\") pod \"watcher-db-create-j2whm\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.327702 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-operator-scripts\") pod \"watcher-c692-account-create-update-9lnsr\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.328898 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef583ead-2fa2-4c76-9753-2dc35141fdea-operator-scripts\") pod \"watcher-db-create-j2whm\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.359056 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfch5\" (UniqueName: \"kubernetes.io/projected/ef583ead-2fa2-4c76-9753-2dc35141fdea-kube-api-access-dfch5\") pod \"watcher-db-create-j2whm\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.429779 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnxnk\" (UniqueName: \"kubernetes.io/projected/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-kube-api-access-nnxnk\") pod \"watcher-c692-account-create-update-9lnsr\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.430378 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-operator-scripts\") pod \"watcher-c692-account-create-update-9lnsr\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.431434 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-operator-scripts\") pod \"watcher-c692-account-create-update-9lnsr\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.448652 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnxnk\" (UniqueName: \"kubernetes.io/projected/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-kube-api-access-nnxnk\") pod \"watcher-c692-account-create-update-9lnsr\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.456359 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:11 crc kubenswrapper[4848]: I0128 13:06:11.595352 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:13 crc kubenswrapper[4848]: I0128 13:06:13.933444 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:14 crc kubenswrapper[4848]: I0128 13:06:14.005139 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86fbff885-vnwng"] Jan 28 13:06:14 crc kubenswrapper[4848]: I0128 13:06:14.005392 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86fbff885-vnwng" podUID="382bff15-0b1d-495c-be83-5f742696eb21" containerName="dnsmasq-dns" containerID="cri-o://e813f2ee37e8590f124e904ec232db6009611c3c33cad783548a8eda0815051f" gracePeriod=10 Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.010791 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86fbff885-vnwng" podUID="382bff15-0b1d-495c-be83-5f742696eb21" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.105:5353: connect: connection refused" Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.043914 4848 generic.go:334] "Generic (PLEG): container finished" podID="382bff15-0b1d-495c-be83-5f742696eb21" containerID="e813f2ee37e8590f124e904ec232db6009611c3c33cad783548a8eda0815051f" exitCode=0 Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.044009 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86fbff885-vnwng" event={"ID":"382bff15-0b1d-495c-be83-5f742696eb21","Type":"ContainerDied","Data":"e813f2ee37e8590f124e904ec232db6009611c3c33cad783548a8eda0815051f"} Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.288207 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.434841 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.771092 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-5svk6"] Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.773196 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.781736 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.798463 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-5svk6"] Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.932421 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5xrr\" (UniqueName: \"kubernetes.io/projected/df0bed5d-f23c-47e3-8551-7bfb95b14947-kube-api-access-x5xrr\") pod \"root-account-create-update-5svk6\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:15 crc kubenswrapper[4848]: I0128 13:06:15.932700 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df0bed5d-f23c-47e3-8551-7bfb95b14947-operator-scripts\") pod \"root-account-create-update-5svk6\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:16 crc kubenswrapper[4848]: I0128 13:06:16.034957 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df0bed5d-f23c-47e3-8551-7bfb95b14947-operator-scripts\") pod \"root-account-create-update-5svk6\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:16 crc kubenswrapper[4848]: I0128 13:06:16.035019 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5xrr\" (UniqueName: \"kubernetes.io/projected/df0bed5d-f23c-47e3-8551-7bfb95b14947-kube-api-access-x5xrr\") pod \"root-account-create-update-5svk6\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:16 crc kubenswrapper[4848]: I0128 13:06:16.036073 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df0bed5d-f23c-47e3-8551-7bfb95b14947-operator-scripts\") pod \"root-account-create-update-5svk6\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:16 crc kubenswrapper[4848]: I0128 13:06:16.056972 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5xrr\" (UniqueName: \"kubernetes.io/projected/df0bed5d-f23c-47e3-8551-7bfb95b14947-kube-api-access-x5xrr\") pod \"root-account-create-update-5svk6\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:16 crc kubenswrapper[4848]: I0128 13:06:16.099793 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.285716 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:18 crc kubenswrapper[4848]: E0128 13:06:18.285988 4848 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 13:06:18 crc kubenswrapper[4848]: E0128 13:06:18.286220 4848 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 13:06:18 crc kubenswrapper[4848]: E0128 13:06:18.286329 4848 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift podName:af520475-92ee-41e6-90e1-7ad3d9609d51 nodeName:}" failed. No retries permitted until 2026-01-28 13:06:34.286303448 +0000 UTC m=+1221.198520486 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift") pod "swift-storage-0" (UID: "af520475-92ee-41e6-90e1-7ad3d9609d51") : configmap "swift-ring-files" not found Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.541462 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-mrvkg"] Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.542852 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.553171 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-mrvkg"] Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.634766 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-4ada-account-create-update-nj927"] Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.636272 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.640542 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.650355 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-4ada-account-create-update-nj927"] Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.694590 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzhmr\" (UniqueName: \"kubernetes.io/projected/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-kube-api-access-bzhmr\") pod \"keystone-db-create-mrvkg\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.694663 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-operator-scripts\") pod \"keystone-db-create-mrvkg\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.797304 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/39291113-86f7-4ddb-a219-f67ba93d35cb-kube-api-access-dc96w\") pod \"keystone-4ada-account-create-update-nj927\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.797390 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39291113-86f7-4ddb-a219-f67ba93d35cb-operator-scripts\") pod \"keystone-4ada-account-create-update-nj927\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.797436 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzhmr\" (UniqueName: \"kubernetes.io/projected/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-kube-api-access-bzhmr\") pod \"keystone-db-create-mrvkg\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.797479 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-operator-scripts\") pod \"keystone-db-create-mrvkg\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.798602 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-operator-scripts\") pod \"keystone-db-create-mrvkg\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.822015 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzhmr\" (UniqueName: \"kubernetes.io/projected/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-kube-api-access-bzhmr\") pod \"keystone-db-create-mrvkg\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.869945 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.900507 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/39291113-86f7-4ddb-a219-f67ba93d35cb-kube-api-access-dc96w\") pod \"keystone-4ada-account-create-update-nj927\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.900609 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39291113-86f7-4ddb-a219-f67ba93d35cb-operator-scripts\") pod \"keystone-4ada-account-create-update-nj927\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.901560 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39291113-86f7-4ddb-a219-f67ba93d35cb-operator-scripts\") pod \"keystone-4ada-account-create-update-nj927\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.917514 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/39291113-86f7-4ddb-a219-f67ba93d35cb-kube-api-access-dc96w\") pod \"keystone-4ada-account-create-update-nj927\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:18 crc kubenswrapper[4848]: I0128 13:06:18.960007 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.677066 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.875986 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-dns-svc\") pod \"382bff15-0b1d-495c-be83-5f742696eb21\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.876720 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttsfb\" (UniqueName: \"kubernetes.io/projected/382bff15-0b1d-495c-be83-5f742696eb21-kube-api-access-ttsfb\") pod \"382bff15-0b1d-495c-be83-5f742696eb21\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.876892 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-config\") pod \"382bff15-0b1d-495c-be83-5f742696eb21\" (UID: \"382bff15-0b1d-495c-be83-5f742696eb21\") " Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.891037 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/382bff15-0b1d-495c-be83-5f742696eb21-kube-api-access-ttsfb" (OuterVolumeSpecName: "kube-api-access-ttsfb") pod "382bff15-0b1d-495c-be83-5f742696eb21" (UID: "382bff15-0b1d-495c-be83-5f742696eb21"). InnerVolumeSpecName "kube-api-access-ttsfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.948083 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-config" (OuterVolumeSpecName: "config") pod "382bff15-0b1d-495c-be83-5f742696eb21" (UID: "382bff15-0b1d-495c-be83-5f742696eb21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.958498 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "382bff15-0b1d-495c-be83-5f742696eb21" (UID: "382bff15-0b1d-495c-be83-5f742696eb21"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.979557 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.979622 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382bff15-0b1d-495c-be83-5f742696eb21-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:19 crc kubenswrapper[4848]: I0128 13:06:19.979635 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttsfb\" (UniqueName: \"kubernetes.io/projected/382bff15-0b1d-495c-be83-5f742696eb21-kube-api-access-ttsfb\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.015520 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9328-account-create-update-8vsqt"] Jan 28 13:06:20 crc kubenswrapper[4848]: W0128 13:06:20.021972 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2dbef0d_9acf_4c11_8634_d0d8e0f88f5d.slice/crio-ad4d46e5c5356c8d9bd8f99841d13cd13b47932e5ca26760d2b8cd80236b2ea4 WatchSource:0}: Error finding container ad4d46e5c5356c8d9bd8f99841d13cd13b47932e5ca26760d2b8cd80236b2ea4: Status 404 returned error can't find the container with id ad4d46e5c5356c8d9bd8f99841d13cd13b47932e5ca26760d2b8cd80236b2ea4 Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.032656 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.095233 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerStarted","Data":"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c"} Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.099189 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86fbff885-vnwng" event={"ID":"382bff15-0b1d-495c-be83-5f742696eb21","Type":"ContainerDied","Data":"1ccdc41c62fe5a46e1ff325b2217313c14aa6e8a2f362c266685433ec9c220a2"} Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.099414 4848 scope.go:117] "RemoveContainer" containerID="e813f2ee37e8590f124e904ec232db6009611c3c33cad783548a8eda0815051f" Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.099258 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86fbff885-vnwng" Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.101150 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9328-account-create-update-8vsqt" event={"ID":"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d","Type":"ContainerStarted","Data":"ad4d46e5c5356c8d9bd8f99841d13cd13b47932e5ca26760d2b8cd80236b2ea4"} Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.128136 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-j2whm"] Jan 28 13:06:20 crc kubenswrapper[4848]: W0128 13:06:20.142125 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef583ead_2fa2_4c76_9753_2dc35141fdea.slice/crio-75aad154795ea56d93e80216e9ef16b1cb183c743bef2c70465e2505ee2dd2f6 WatchSource:0}: Error finding container 75aad154795ea56d93e80216e9ef16b1cb183c743bef2c70465e2505ee2dd2f6: Status 404 returned error can't find the container with id 75aad154795ea56d93e80216e9ef16b1cb183c743bef2c70465e2505ee2dd2f6 Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.170191 4848 scope.go:117] "RemoveContainer" containerID="274adb7efa2d290dff4b6ffda85f305649326be0010fcbbf68bef6209969a298" Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.174604 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86fbff885-vnwng"] Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.185026 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86fbff885-vnwng"] Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.244330 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-4ada-account-create-update-nj927"] Jan 28 13:06:20 crc kubenswrapper[4848]: W0128 13:06:20.248630 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39291113_86f7_4ddb_a219_f67ba93d35cb.slice/crio-8deef21ce68eec41a8f4b6dcc1c7759167f76bb46880223d62882a0574853d49 WatchSource:0}: Error finding container 8deef21ce68eec41a8f4b6dcc1c7759167f76bb46880223d62882a0574853d49: Status 404 returned error can't find the container with id 8deef21ce68eec41a8f4b6dcc1c7759167f76bb46880223d62882a0574853d49 Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.259110 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-mrvkg"] Jan 28 13:06:20 crc kubenswrapper[4848]: W0128 13:06:20.267828 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1147d1b1_a4fb_4bf5_ba1a_282cae5206d3.slice/crio-bfc73b4e0de75bdbf7a0cedfde49dbf575c28c01d5cf4bf1a4fcf3dcb4fa3729 WatchSource:0}: Error finding container bfc73b4e0de75bdbf7a0cedfde49dbf575c28c01d5cf4bf1a4fcf3dcb4fa3729: Status 404 returned error can't find the container with id bfc73b4e0de75bdbf7a0cedfde49dbf575c28c01d5cf4bf1a4fcf3dcb4fa3729 Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.273169 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zk4jd"] Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.447905 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.448224 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-c692-account-create-update-9lnsr"] Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.456103 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-5svk6"] Jan 28 13:06:20 crc kubenswrapper[4848]: W0128 13:06:20.461047 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf0bed5d_f23c_47e3_8551_7bfb95b14947.slice/crio-c9eedfc7fdc0e5c6e650bbc70bc2bbd761981f69eee5b7c3536c591639a22a7c WatchSource:0}: Error finding container c9eedfc7fdc0e5c6e650bbc70bc2bbd761981f69eee5b7c3536c591639a22a7c: Status 404 returned error can't find the container with id c9eedfc7fdc0e5c6e650bbc70bc2bbd761981f69eee5b7c3536c591639a22a7c Jan 28 13:06:20 crc kubenswrapper[4848]: I0128 13:06:20.867153 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="382bff15-0b1d-495c-be83-5f742696eb21" path="/var/lib/kubelet/pods/382bff15-0b1d-495c-be83-5f742696eb21/volumes" Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.149562 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-mrvkg" event={"ID":"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3","Type":"ContainerStarted","Data":"cb38b7e52f59c0c15c68c1a5b20aadda1ffc67ce2a94107da8ea6b1e8aa07d06"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.150085 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-mrvkg" event={"ID":"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3","Type":"ContainerStarted","Data":"bfc73b4e0de75bdbf7a0cedfde49dbf575c28c01d5cf4bf1a4fcf3dcb4fa3729"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.154741 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zk4jd" event={"ID":"17c614c4-a02a-4224-9ee5-b334451f0671","Type":"ContainerStarted","Data":"bcb830c61618da838f91e8173082a00a0d0e8264af4f5fa66b9d9e14f36a4aaa"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.154774 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zk4jd" event={"ID":"17c614c4-a02a-4224-9ee5-b334451f0671","Type":"ContainerStarted","Data":"f0bd3771ee5bc0f2029be8216359ca250077ff2708c0f2e4ee2e480b37e13ecd"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.156904 4848 generic.go:334] "Generic (PLEG): container finished" podID="b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d" containerID="94056acd0176ebd5055fd650bb7d5ef005e92c59de8073cafe0eb28b375696f0" exitCode=0 Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.156974 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9328-account-create-update-8vsqt" event={"ID":"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d","Type":"ContainerDied","Data":"94056acd0176ebd5055fd650bb7d5ef005e92c59de8073cafe0eb28b375696f0"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.162455 4848 generic.go:334] "Generic (PLEG): container finished" podID="ef583ead-2fa2-4c76-9753-2dc35141fdea" containerID="66ee4cabd88ee13dfd685769c434a74dd50af053b3c616a8fe89511d3aed06f9" exitCode=0 Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.162559 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-j2whm" event={"ID":"ef583ead-2fa2-4c76-9753-2dc35141fdea","Type":"ContainerDied","Data":"66ee4cabd88ee13dfd685769c434a74dd50af053b3c616a8fe89511d3aed06f9"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.162576 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-j2whm" event={"ID":"ef583ead-2fa2-4c76-9753-2dc35141fdea","Type":"ContainerStarted","Data":"75aad154795ea56d93e80216e9ef16b1cb183c743bef2c70465e2505ee2dd2f6"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.173759 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-mrvkg" podStartSLOduration=3.1737407109999998 podStartE2EDuration="3.173740711s" podCreationTimestamp="2026-01-28 13:06:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:21.172224019 +0000 UTC m=+1208.084441067" watchObservedRunningTime="2026-01-28 13:06:21.173740711 +0000 UTC m=+1208.085957749" Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.182710 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mgbt4" event={"ID":"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d","Type":"ContainerStarted","Data":"5c970a2cdac2253a43b9e1deb3279487c65f9fd088046c3907f0606d74aa527e"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.191988 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5svk6" event={"ID":"df0bed5d-f23c-47e3-8551-7bfb95b14947","Type":"ContainerStarted","Data":"bd4995e919c1127dc4db389c37f912ca1e4ff74e8814effa22e3f5c8571e28f6"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.192050 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5svk6" event={"ID":"df0bed5d-f23c-47e3-8551-7bfb95b14947","Type":"ContainerStarted","Data":"c9eedfc7fdc0e5c6e650bbc70bc2bbd761981f69eee5b7c3536c591639a22a7c"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.202005 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-4ada-account-create-update-nj927" event={"ID":"39291113-86f7-4ddb-a219-f67ba93d35cb","Type":"ContainerStarted","Data":"c82da1fb8e51d3048fc7a160a77b16bb68479aa101185dfb0d86e6e4893c1d41"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.202422 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-4ada-account-create-update-nj927" event={"ID":"39291113-86f7-4ddb-a219-f67ba93d35cb","Type":"ContainerStarted","Data":"8deef21ce68eec41a8f4b6dcc1c7759167f76bb46880223d62882a0574853d49"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.248887 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c692-account-create-update-9lnsr" event={"ID":"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed","Type":"ContainerStarted","Data":"dfaae6c353997593f6249e80d22b036affd390ab264003abbdb4d81afffe9569"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.248956 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c692-account-create-update-9lnsr" event={"ID":"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed","Type":"ContainerStarted","Data":"78adb7b6194e629429bd04aad52a02fb685d5a97834c82320d77da27b7d80fce"} Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.271454 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-zk4jd" podStartSLOduration=12.271423716 podStartE2EDuration="12.271423716s" podCreationTimestamp="2026-01-28 13:06:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:21.233466613 +0000 UTC m=+1208.145683661" watchObservedRunningTime="2026-01-28 13:06:21.271423716 +0000 UTC m=+1208.183640754" Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.280702 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-5svk6" podStartSLOduration=6.280672081 podStartE2EDuration="6.280672081s" podCreationTimestamp="2026-01-28 13:06:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:21.271181289 +0000 UTC m=+1208.183398327" watchObservedRunningTime="2026-01-28 13:06:21.280672081 +0000 UTC m=+1208.192889119" Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.301430 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-mgbt4" podStartSLOduration=3.025603097 podStartE2EDuration="15.30138724s" podCreationTimestamp="2026-01-28 13:06:06 +0000 UTC" firstStartedPulling="2026-01-28 13:06:07.191496856 +0000 UTC m=+1194.103713894" lastFinishedPulling="2026-01-28 13:06:19.467280999 +0000 UTC m=+1206.379498037" observedRunningTime="2026-01-28 13:06:21.289438002 +0000 UTC m=+1208.201655040" watchObservedRunningTime="2026-01-28 13:06:21.30138724 +0000 UTC m=+1208.213604278" Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.319757 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-4ada-account-create-update-nj927" podStartSLOduration=3.319725635 podStartE2EDuration="3.319725635s" podCreationTimestamp="2026-01-28 13:06:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:21.310992675 +0000 UTC m=+1208.223209713" watchObservedRunningTime="2026-01-28 13:06:21.319725635 +0000 UTC m=+1208.231942673" Jan 28 13:06:21 crc kubenswrapper[4848]: I0128 13:06:21.352637 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-c692-account-create-update-9lnsr" podStartSLOduration=10.352609877999999 podStartE2EDuration="10.352609878s" podCreationTimestamp="2026-01-28 13:06:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:21.328103665 +0000 UTC m=+1208.240320703" watchObservedRunningTime="2026-01-28 13:06:21.352609878 +0000 UTC m=+1208.264826916" Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.261968 4848 generic.go:334] "Generic (PLEG): container finished" podID="82066dd7-1c9c-4edf-b7f9-86eac39cb7ed" containerID="dfaae6c353997593f6249e80d22b036affd390ab264003abbdb4d81afffe9569" exitCode=0 Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.262179 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c692-account-create-update-9lnsr" event={"ID":"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed","Type":"ContainerDied","Data":"dfaae6c353997593f6249e80d22b036affd390ab264003abbdb4d81afffe9569"} Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.264700 4848 generic.go:334] "Generic (PLEG): container finished" podID="df0bed5d-f23c-47e3-8551-7bfb95b14947" containerID="bd4995e919c1127dc4db389c37f912ca1e4ff74e8814effa22e3f5c8571e28f6" exitCode=0 Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.264769 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5svk6" event={"ID":"df0bed5d-f23c-47e3-8551-7bfb95b14947","Type":"ContainerDied","Data":"bd4995e919c1127dc4db389c37f912ca1e4ff74e8814effa22e3f5c8571e28f6"} Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.267091 4848 generic.go:334] "Generic (PLEG): container finished" podID="17c614c4-a02a-4224-9ee5-b334451f0671" containerID="bcb830c61618da838f91e8173082a00a0d0e8264af4f5fa66b9d9e14f36a4aaa" exitCode=0 Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.267147 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zk4jd" event={"ID":"17c614c4-a02a-4224-9ee5-b334451f0671","Type":"ContainerDied","Data":"bcb830c61618da838f91e8173082a00a0d0e8264af4f5fa66b9d9e14f36a4aaa"} Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.270203 4848 generic.go:334] "Generic (PLEG): container finished" podID="39291113-86f7-4ddb-a219-f67ba93d35cb" containerID="c82da1fb8e51d3048fc7a160a77b16bb68479aa101185dfb0d86e6e4893c1d41" exitCode=0 Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.270343 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-4ada-account-create-update-nj927" event={"ID":"39291113-86f7-4ddb-a219-f67ba93d35cb","Type":"ContainerDied","Data":"c82da1fb8e51d3048fc7a160a77b16bb68479aa101185dfb0d86e6e4893c1d41"} Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.272776 4848 generic.go:334] "Generic (PLEG): container finished" podID="1147d1b1-a4fb-4bf5-ba1a-282cae5206d3" containerID="cb38b7e52f59c0c15c68c1a5b20aadda1ffc67ce2a94107da8ea6b1e8aa07d06" exitCode=0 Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.272849 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-mrvkg" event={"ID":"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3","Type":"ContainerDied","Data":"cb38b7e52f59c0c15c68c1a5b20aadda1ffc67ce2a94107da8ea6b1e8aa07d06"} Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.877002 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.884227 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.962917 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-operator-scripts\") pod \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.962998 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdmcm\" (UniqueName: \"kubernetes.io/projected/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-kube-api-access-jdmcm\") pod \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\" (UID: \"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d\") " Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.963025 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef583ead-2fa2-4c76-9753-2dc35141fdea-operator-scripts\") pod \"ef583ead-2fa2-4c76-9753-2dc35141fdea\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.963130 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfch5\" (UniqueName: \"kubernetes.io/projected/ef583ead-2fa2-4c76-9753-2dc35141fdea-kube-api-access-dfch5\") pod \"ef583ead-2fa2-4c76-9753-2dc35141fdea\" (UID: \"ef583ead-2fa2-4c76-9753-2dc35141fdea\") " Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.963531 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef583ead-2fa2-4c76-9753-2dc35141fdea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ef583ead-2fa2-4c76-9753-2dc35141fdea" (UID: "ef583ead-2fa2-4c76-9753-2dc35141fdea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.964410 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d" (UID: "b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.969115 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-kube-api-access-jdmcm" (OuterVolumeSpecName: "kube-api-access-jdmcm") pod "b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d" (UID: "b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d"). InnerVolumeSpecName "kube-api-access-jdmcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:22 crc kubenswrapper[4848]: I0128 13:06:22.970510 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef583ead-2fa2-4c76-9753-2dc35141fdea-kube-api-access-dfch5" (OuterVolumeSpecName: "kube-api-access-dfch5") pod "ef583ead-2fa2-4c76-9753-2dc35141fdea" (UID: "ef583ead-2fa2-4c76-9753-2dc35141fdea"). InnerVolumeSpecName "kube-api-access-dfch5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.064084 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdmcm\" (UniqueName: \"kubernetes.io/projected/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-kube-api-access-jdmcm\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.064437 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef583ead-2fa2-4c76-9753-2dc35141fdea-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.064507 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfch5\" (UniqueName: \"kubernetes.io/projected/ef583ead-2fa2-4c76-9753-2dc35141fdea-kube-api-access-dfch5\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.064570 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.288670 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerStarted","Data":"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67"} Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.291841 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-j2whm" event={"ID":"ef583ead-2fa2-4c76-9753-2dc35141fdea","Type":"ContainerDied","Data":"75aad154795ea56d93e80216e9ef16b1cb183c743bef2c70465e2505ee2dd2f6"} Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.291963 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75aad154795ea56d93e80216e9ef16b1cb183c743bef2c70465e2505ee2dd2f6" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.291875 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-j2whm" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.294498 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9328-account-create-update-8vsqt" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.294534 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9328-account-create-update-8vsqt" event={"ID":"b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d","Type":"ContainerDied","Data":"ad4d46e5c5356c8d9bd8f99841d13cd13b47932e5ca26760d2b8cd80236b2ea4"} Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.294611 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad4d46e5c5356c8d9bd8f99841d13cd13b47932e5ca26760d2b8cd80236b2ea4" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.652509 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.783350 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39291113-86f7-4ddb-a219-f67ba93d35cb-operator-scripts\") pod \"39291113-86f7-4ddb-a219-f67ba93d35cb\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.783557 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/39291113-86f7-4ddb-a219-f67ba93d35cb-kube-api-access-dc96w\") pod \"39291113-86f7-4ddb-a219-f67ba93d35cb\" (UID: \"39291113-86f7-4ddb-a219-f67ba93d35cb\") " Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.787210 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39291113-86f7-4ddb-a219-f67ba93d35cb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "39291113-86f7-4ddb-a219-f67ba93d35cb" (UID: "39291113-86f7-4ddb-a219-f67ba93d35cb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.791781 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39291113-86f7-4ddb-a219-f67ba93d35cb-kube-api-access-dc96w" (OuterVolumeSpecName: "kube-api-access-dc96w") pod "39291113-86f7-4ddb-a219-f67ba93d35cb" (UID: "39291113-86f7-4ddb-a219-f67ba93d35cb"). InnerVolumeSpecName "kube-api-access-dc96w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.886597 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39291113-86f7-4ddb-a219-f67ba93d35cb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.886651 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc96w\" (UniqueName: \"kubernetes.io/projected/39291113-86f7-4ddb-a219-f67ba93d35cb-kube-api-access-dc96w\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.963426 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.975255 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.980771 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:23 crc kubenswrapper[4848]: I0128 13:06:23.990979 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.096638 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5xrr\" (UniqueName: \"kubernetes.io/projected/df0bed5d-f23c-47e3-8551-7bfb95b14947-kube-api-access-x5xrr\") pod \"df0bed5d-f23c-47e3-8551-7bfb95b14947\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.097165 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-operator-scripts\") pod \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.097206 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/17c614c4-a02a-4224-9ee5-b334451f0671-operator-scripts\") pod \"17c614c4-a02a-4224-9ee5-b334451f0671\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.097266 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7r9gs\" (UniqueName: \"kubernetes.io/projected/17c614c4-a02a-4224-9ee5-b334451f0671-kube-api-access-7r9gs\") pod \"17c614c4-a02a-4224-9ee5-b334451f0671\" (UID: \"17c614c4-a02a-4224-9ee5-b334451f0671\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.097309 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-operator-scripts\") pod \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.097388 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df0bed5d-f23c-47e3-8551-7bfb95b14947-operator-scripts\") pod \"df0bed5d-f23c-47e3-8551-7bfb95b14947\" (UID: \"df0bed5d-f23c-47e3-8551-7bfb95b14947\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.097442 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnxnk\" (UniqueName: \"kubernetes.io/projected/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-kube-api-access-nnxnk\") pod \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\" (UID: \"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.097549 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzhmr\" (UniqueName: \"kubernetes.io/projected/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-kube-api-access-bzhmr\") pod \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\" (UID: \"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3\") " Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.099206 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df0bed5d-f23c-47e3-8551-7bfb95b14947-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "df0bed5d-f23c-47e3-8551-7bfb95b14947" (UID: "df0bed5d-f23c-47e3-8551-7bfb95b14947"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.099797 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1147d1b1-a4fb-4bf5-ba1a-282cae5206d3" (UID: "1147d1b1-a4fb-4bf5-ba1a-282cae5206d3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.103157 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17c614c4-a02a-4224-9ee5-b334451f0671-kube-api-access-7r9gs" (OuterVolumeSpecName: "kube-api-access-7r9gs") pod "17c614c4-a02a-4224-9ee5-b334451f0671" (UID: "17c614c4-a02a-4224-9ee5-b334451f0671"). InnerVolumeSpecName "kube-api-access-7r9gs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.103530 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17c614c4-a02a-4224-9ee5-b334451f0671-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "17c614c4-a02a-4224-9ee5-b334451f0671" (UID: "17c614c4-a02a-4224-9ee5-b334451f0671"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.103981 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "82066dd7-1c9c-4edf-b7f9-86eac39cb7ed" (UID: "82066dd7-1c9c-4edf-b7f9-86eac39cb7ed"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.108789 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df0bed5d-f23c-47e3-8551-7bfb95b14947-kube-api-access-x5xrr" (OuterVolumeSpecName: "kube-api-access-x5xrr") pod "df0bed5d-f23c-47e3-8551-7bfb95b14947" (UID: "df0bed5d-f23c-47e3-8551-7bfb95b14947"). InnerVolumeSpecName "kube-api-access-x5xrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.113635 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-kube-api-access-nnxnk" (OuterVolumeSpecName: "kube-api-access-nnxnk") pod "82066dd7-1c9c-4edf-b7f9-86eac39cb7ed" (UID: "82066dd7-1c9c-4edf-b7f9-86eac39cb7ed"). InnerVolumeSpecName "kube-api-access-nnxnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.129755 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-kube-api-access-bzhmr" (OuterVolumeSpecName: "kube-api-access-bzhmr") pod "1147d1b1-a4fb-4bf5-ba1a-282cae5206d3" (UID: "1147d1b1-a4fb-4bf5-ba1a-282cae5206d3"). InnerVolumeSpecName "kube-api-access-bzhmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.200978 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5xrr\" (UniqueName: \"kubernetes.io/projected/df0bed5d-f23c-47e3-8551-7bfb95b14947-kube-api-access-x5xrr\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.201050 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.201065 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/17c614c4-a02a-4224-9ee5-b334451f0671-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.201078 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7r9gs\" (UniqueName: \"kubernetes.io/projected/17c614c4-a02a-4224-9ee5-b334451f0671-kube-api-access-7r9gs\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.201113 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.201124 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df0bed5d-f23c-47e3-8551-7bfb95b14947-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.201139 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnxnk\" (UniqueName: \"kubernetes.io/projected/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed-kube-api-access-nnxnk\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.201151 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzhmr\" (UniqueName: \"kubernetes.io/projected/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3-kube-api-access-bzhmr\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.307615 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-mrvkg" event={"ID":"1147d1b1-a4fb-4bf5-ba1a-282cae5206d3","Type":"ContainerDied","Data":"bfc73b4e0de75bdbf7a0cedfde49dbf575c28c01d5cf4bf1a4fcf3dcb4fa3729"} Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.307671 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfc73b4e0de75bdbf7a0cedfde49dbf575c28c01d5cf4bf1a4fcf3dcb4fa3729" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.307692 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-mrvkg" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.310567 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5svk6" event={"ID":"df0bed5d-f23c-47e3-8551-7bfb95b14947","Type":"ContainerDied","Data":"c9eedfc7fdc0e5c6e650bbc70bc2bbd761981f69eee5b7c3536c591639a22a7c"} Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.310622 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9eedfc7fdc0e5c6e650bbc70bc2bbd761981f69eee5b7c3536c591639a22a7c" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.310542 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5svk6" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.312601 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zk4jd" event={"ID":"17c614c4-a02a-4224-9ee5-b334451f0671","Type":"ContainerDied","Data":"f0bd3771ee5bc0f2029be8216359ca250077ff2708c0f2e4ee2e480b37e13ecd"} Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.312641 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0bd3771ee5bc0f2029be8216359ca250077ff2708c0f2e4ee2e480b37e13ecd" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.312697 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zk4jd" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.314403 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-4ada-account-create-update-nj927" event={"ID":"39291113-86f7-4ddb-a219-f67ba93d35cb","Type":"ContainerDied","Data":"8deef21ce68eec41a8f4b6dcc1c7759167f76bb46880223d62882a0574853d49"} Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.314435 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8deef21ce68eec41a8f4b6dcc1c7759167f76bb46880223d62882a0574853d49" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.314518 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4ada-account-create-update-nj927" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.318401 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c692-account-create-update-9lnsr" event={"ID":"82066dd7-1c9c-4edf-b7f9-86eac39cb7ed","Type":"ContainerDied","Data":"78adb7b6194e629429bd04aad52a02fb685d5a97834c82320d77da27b7d80fce"} Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.318444 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78adb7b6194e629429bd04aad52a02fb685d5a97834c82320d77da27b7d80fce" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.318539 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c692-account-create-update-9lnsr" Jan 28 13:06:24 crc kubenswrapper[4848]: I0128 13:06:24.692152 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 28 13:06:25 crc kubenswrapper[4848]: I0128 13:06:25.254611 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-p6z9h" podUID="77e3e961-2cae-4bee-b73a-40336940b35c" containerName="ovn-controller" probeResult="failure" output=< Jan 28 13:06:25 crc kubenswrapper[4848]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 13:06:25 crc kubenswrapper[4848]: > Jan 28 13:06:27 crc kubenswrapper[4848]: I0128 13:06:27.134702 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-5svk6"] Jan 28 13:06:27 crc kubenswrapper[4848]: I0128 13:06:27.144226 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-5svk6"] Jan 28 13:06:27 crc kubenswrapper[4848]: I0128 13:06:27.348139 4848 generic.go:334] "Generic (PLEG): container finished" podID="ff062566-cfd3-4393-b794-695d3473ef1a" containerID="3409854ac4ba76e447d80336a8d88001fb03843e9122063cd66cfa7a1c819997" exitCode=0 Jan 28 13:06:27 crc kubenswrapper[4848]: I0128 13:06:27.348287 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"ff062566-cfd3-4393-b794-695d3473ef1a","Type":"ContainerDied","Data":"3409854ac4ba76e447d80336a8d88001fb03843e9122063cd66cfa7a1c819997"} Jan 28 13:06:27 crc kubenswrapper[4848]: I0128 13:06:27.352413 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerStarted","Data":"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075"} Jan 28 13:06:27 crc kubenswrapper[4848]: I0128 13:06:27.448714 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.362612 4848 generic.go:334] "Generic (PLEG): container finished" podID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerID="5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e" exitCode=0 Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.362728 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6be2776-ada1-4c48-9588-9e488283ee6e","Type":"ContainerDied","Data":"5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e"} Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.365219 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"ff062566-cfd3-4393-b794-695d3473ef1a","Type":"ContainerStarted","Data":"3d2e4310195427de32add74d32fd71cd1d6a48d65aeffd18accdaee82f56ad1d"} Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.366551 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.369100 4848 generic.go:334] "Generic (PLEG): container finished" podID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerID="a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24" exitCode=0 Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.369966 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9069c6ac-fe99-41c7-8ee1-0154d87e506c","Type":"ContainerDied","Data":"a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24"} Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.389888 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=15.933368172 podStartE2EDuration="57.389859614s" podCreationTimestamp="2026-01-28 13:05:31 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.143583491 +0000 UTC m=+1172.055800529" lastFinishedPulling="2026-01-28 13:06:26.600074933 +0000 UTC m=+1213.512291971" observedRunningTime="2026-01-28 13:06:27.413812466 +0000 UTC m=+1214.326029504" watchObservedRunningTime="2026-01-28 13:06:28.389859614 +0000 UTC m=+1215.302076652" Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.421990 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-notifications-server-0" podStartSLOduration=56.480726914 podStartE2EDuration="1m4.421954296s" podCreationTimestamp="2026-01-28 13:05:24 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.199227261 +0000 UTC m=+1172.111444299" lastFinishedPulling="2026-01-28 13:05:53.140454643 +0000 UTC m=+1180.052671681" observedRunningTime="2026-01-28 13:06:28.416194398 +0000 UTC m=+1215.328411456" watchObservedRunningTime="2026-01-28 13:06:28.421954296 +0000 UTC m=+1215.334171334" Jan 28 13:06:28 crc kubenswrapper[4848]: I0128 13:06:28.862854 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df0bed5d-f23c-47e3-8551-7bfb95b14947" path="/var/lib/kubelet/pods/df0bed5d-f23c-47e3-8551-7bfb95b14947/volumes" Jan 28 13:06:29 crc kubenswrapper[4848]: I0128 13:06:29.381213 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9069c6ac-fe99-41c7-8ee1-0154d87e506c","Type":"ContainerStarted","Data":"d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4"} Jan 28 13:06:29 crc kubenswrapper[4848]: I0128 13:06:29.381495 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 28 13:06:29 crc kubenswrapper[4848]: I0128 13:06:29.384047 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6be2776-ada1-4c48-9588-9e488283ee6e","Type":"ContainerStarted","Data":"ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff"} Jan 28 13:06:29 crc kubenswrapper[4848]: I0128 13:06:29.384596 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:06:29 crc kubenswrapper[4848]: I0128 13:06:29.416195 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=58.523097055 podStartE2EDuration="1m6.416171403s" podCreationTimestamp="2026-01-28 13:05:23 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.129283768 +0000 UTC m=+1172.041500816" lastFinishedPulling="2026-01-28 13:05:53.022358126 +0000 UTC m=+1179.934575164" observedRunningTime="2026-01-28 13:06:29.414320482 +0000 UTC m=+1216.326537540" watchObservedRunningTime="2026-01-28 13:06:29.416171403 +0000 UTC m=+1216.328388441" Jan 28 13:06:29 crc kubenswrapper[4848]: I0128 13:06:29.452930 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=57.486842299 podStartE2EDuration="1m5.452902163s" podCreationTimestamp="2026-01-28 13:05:24 +0000 UTC" firstStartedPulling="2026-01-28 13:05:45.164420824 +0000 UTC m=+1172.076637862" lastFinishedPulling="2026-01-28 13:05:53.130480688 +0000 UTC m=+1180.042697726" observedRunningTime="2026-01-28 13:06:29.445919271 +0000 UTC m=+1216.358136309" watchObservedRunningTime="2026-01-28 13:06:29.452902163 +0000 UTC m=+1216.365119201" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.258588 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-p6z9h" podUID="77e3e961-2cae-4bee-b73a-40336940b35c" containerName="ovn-controller" probeResult="failure" output=< Jan 28 13:06:30 crc kubenswrapper[4848]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 13:06:30 crc kubenswrapper[4848]: > Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.360661 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.380601 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-59mkx" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.419047 4848 generic.go:334] "Generic (PLEG): container finished" podID="e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" containerID="5c970a2cdac2253a43b9e1deb3279487c65f9fd088046c3907f0606d74aa527e" exitCode=0 Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.422372 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mgbt4" event={"ID":"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d","Type":"ContainerDied","Data":"5c970a2cdac2253a43b9e1deb3279487c65f9fd088046c3907f0606d74aa527e"} Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.697048 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-p6z9h-config-mc7ld"] Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.697969 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382bff15-0b1d-495c-be83-5f742696eb21" containerName="dnsmasq-dns" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698053 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="382bff15-0b1d-495c-be83-5f742696eb21" containerName="dnsmasq-dns" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.698118 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698183 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.698273 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1147d1b1-a4fb-4bf5-ba1a-282cae5206d3" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698334 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="1147d1b1-a4fb-4bf5-ba1a-282cae5206d3" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.698391 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17c614c4-a02a-4224-9ee5-b334451f0671" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698462 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="17c614c4-a02a-4224-9ee5-b334451f0671" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.698528 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df0bed5d-f23c-47e3-8551-7bfb95b14947" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698584 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="df0bed5d-f23c-47e3-8551-7bfb95b14947" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.698670 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39291113-86f7-4ddb-a219-f67ba93d35cb" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698725 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="39291113-86f7-4ddb-a219-f67ba93d35cb" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.698783 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef583ead-2fa2-4c76-9753-2dc35141fdea" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698843 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef583ead-2fa2-4c76-9753-2dc35141fdea" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.698900 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82066dd7-1c9c-4edf-b7f9-86eac39cb7ed" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.698981 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="82066dd7-1c9c-4edf-b7f9-86eac39cb7ed" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: E0128 13:06:30.699078 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382bff15-0b1d-495c-be83-5f742696eb21" containerName="init" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.699159 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="382bff15-0b1d-495c-be83-5f742696eb21" containerName="init" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.699453 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef583ead-2fa2-4c76-9753-2dc35141fdea" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.699549 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="382bff15-0b1d-495c-be83-5f742696eb21" containerName="dnsmasq-dns" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.699626 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="17c614c4-a02a-4224-9ee5-b334451f0671" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.699694 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.702028 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="1147d1b1-a4fb-4bf5-ba1a-282cae5206d3" containerName="mariadb-database-create" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.702162 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="39291113-86f7-4ddb-a219-f67ba93d35cb" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.702263 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="82066dd7-1c9c-4edf-b7f9-86eac39cb7ed" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.702356 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="df0bed5d-f23c-47e3-8551-7bfb95b14947" containerName="mariadb-account-create-update" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.703376 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.707772 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.717752 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-p6z9h-config-mc7ld"] Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.841204 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.841287 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-additional-scripts\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.841319 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run-ovn\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.841596 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-scripts\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.841733 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-log-ovn\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.841799 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpspl\" (UniqueName: \"kubernetes.io/projected/1c73b917-e57a-4532-a6ef-3f68add8f070-kube-api-access-lpspl\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.944417 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.944487 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-additional-scripts\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.944518 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run-ovn\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.944612 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-scripts\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.944693 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-log-ovn\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.944741 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpspl\" (UniqueName: \"kubernetes.io/projected/1c73b917-e57a-4532-a6ef-3f68add8f070-kube-api-access-lpspl\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.944879 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.945569 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run-ovn\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.945593 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-additional-scripts\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.945617 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-log-ovn\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.947749 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-scripts\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:30 crc kubenswrapper[4848]: I0128 13:06:30.975466 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpspl\" (UniqueName: \"kubernetes.io/projected/1c73b917-e57a-4532-a6ef-3f68add8f070-kube-api-access-lpspl\") pod \"ovn-controller-p6z9h-config-mc7ld\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.027044 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.590428 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-p6z9h-config-mc7ld"] Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.829383 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.864204 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-combined-ca-bundle\") pod \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.864300 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-swiftconf\") pod \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.864331 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-etc-swift\") pod \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.864529 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-ring-data-devices\") pod \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.865581 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" (UID: "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.867536 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-dispersionconf\") pod \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.867694 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xz747\" (UniqueName: \"kubernetes.io/projected/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-kube-api-access-xz747\") pod \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.867778 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-scripts\") pod \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\" (UID: \"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d\") " Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.869222 4848 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.876468 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" (UID: "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.877535 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-kube-api-access-xz747" (OuterVolumeSpecName: "kube-api-access-xz747") pod "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" (UID: "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d"). InnerVolumeSpecName "kube-api-access-xz747". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.879295 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" (UID: "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.895633 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" (UID: "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.913109 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" (UID: "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.915848 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-scripts" (OuterVolumeSpecName: "scripts") pod "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" (UID: "e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.970810 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.970853 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.970868 4848 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.970879 4848 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.970890 4848 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:31 crc kubenswrapper[4848]: I0128 13:06:31.970902 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xz747\" (UniqueName: \"kubernetes.io/projected/e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d-kube-api-access-xz747\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.159905 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-cvcts"] Jan 28 13:06:32 crc kubenswrapper[4848]: E0128 13:06:32.161369 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" containerName="swift-ring-rebalance" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.161395 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" containerName="swift-ring-rebalance" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.161771 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d" containerName="swift-ring-rebalance" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.163058 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.175542 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.181799 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cvcts"] Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.278999 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k978l\" (UniqueName: \"kubernetes.io/projected/576845fc-5259-4d19-be49-02ef9575eeb5-kube-api-access-k978l\") pod \"root-account-create-update-cvcts\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.279173 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/576845fc-5259-4d19-be49-02ef9575eeb5-operator-scripts\") pod \"root-account-create-update-cvcts\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.380708 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k978l\" (UniqueName: \"kubernetes.io/projected/576845fc-5259-4d19-be49-02ef9575eeb5-kube-api-access-k978l\") pod \"root-account-create-update-cvcts\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.380799 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/576845fc-5259-4d19-be49-02ef9575eeb5-operator-scripts\") pod \"root-account-create-update-cvcts\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.381716 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/576845fc-5259-4d19-be49-02ef9575eeb5-operator-scripts\") pod \"root-account-create-update-cvcts\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.404775 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k978l\" (UniqueName: \"kubernetes.io/projected/576845fc-5259-4d19-be49-02ef9575eeb5-kube-api-access-k978l\") pod \"root-account-create-update-cvcts\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.439338 4848 generic.go:334] "Generic (PLEG): container finished" podID="1c73b917-e57a-4532-a6ef-3f68add8f070" containerID="1336ef97eb2f49fc570ecd186bea56ae243a27ff5507d7981d6a6805b0007e26" exitCode=0 Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.439418 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h-config-mc7ld" event={"ID":"1c73b917-e57a-4532-a6ef-3f68add8f070","Type":"ContainerDied","Data":"1336ef97eb2f49fc570ecd186bea56ae243a27ff5507d7981d6a6805b0007e26"} Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.439456 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h-config-mc7ld" event={"ID":"1c73b917-e57a-4532-a6ef-3f68add8f070","Type":"ContainerStarted","Data":"7680bd1084b12a3776ec14bd8003a3fe6a22017e9cb95fdc7d7a96d9b2fbf12a"} Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.443076 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mgbt4" event={"ID":"e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d","Type":"ContainerDied","Data":"9f313e07f1f5c7c08c85c0200e7b396600c6c41dd77f6ed284867b039e3a16c2"} Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.443142 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f313e07f1f5c7c08c85c0200e7b396600c6c41dd77f6ed284867b039e3a16c2" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.443240 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mgbt4" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.451825 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.453917 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:32 crc kubenswrapper[4848]: I0128 13:06:32.509971 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:33 crc kubenswrapper[4848]: I0128 13:06:33.011393 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cvcts"] Jan 28 13:06:33 crc kubenswrapper[4848]: W0128 13:06:33.018814 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod576845fc_5259_4d19_be49_02ef9575eeb5.slice/crio-ae609ffc9cc9856aef88da2590626e64ad740f6c86c8568bea6a6e1928518122 WatchSource:0}: Error finding container ae609ffc9cc9856aef88da2590626e64ad740f6c86c8568bea6a6e1928518122: Status 404 returned error can't find the container with id ae609ffc9cc9856aef88da2590626e64ad740f6c86c8568bea6a6e1928518122 Jan 28 13:06:33 crc kubenswrapper[4848]: I0128 13:06:33.455140 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cvcts" event={"ID":"576845fc-5259-4d19-be49-02ef9575eeb5","Type":"ContainerStarted","Data":"30434e89244ba3643953051b44a8ba55bf8188aca26b919d6708604913af95b3"} Jan 28 13:06:33 crc kubenswrapper[4848]: I0128 13:06:33.455196 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cvcts" event={"ID":"576845fc-5259-4d19-be49-02ef9575eeb5","Type":"ContainerStarted","Data":"ae609ffc9cc9856aef88da2590626e64ad740f6c86c8568bea6a6e1928518122"} Jan 28 13:06:33 crc kubenswrapper[4848]: I0128 13:06:33.460900 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:33 crc kubenswrapper[4848]: I0128 13:06:33.482061 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-cvcts" podStartSLOduration=1.482032727 podStartE2EDuration="1.482032727s" podCreationTimestamp="2026-01-28 13:06:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:33.477239856 +0000 UTC m=+1220.389456894" watchObservedRunningTime="2026-01-28 13:06:33.482032727 +0000 UTC m=+1220.394249765" Jan 28 13:06:33 crc kubenswrapper[4848]: I0128 13:06:33.915549 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.011798 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-scripts\") pod \"1c73b917-e57a-4532-a6ef-3f68add8f070\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.011873 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run-ovn\") pod \"1c73b917-e57a-4532-a6ef-3f68add8f070\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.011956 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run\") pod \"1c73b917-e57a-4532-a6ef-3f68add8f070\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.012013 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-additional-scripts\") pod \"1c73b917-e57a-4532-a6ef-3f68add8f070\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.012101 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "1c73b917-e57a-4532-a6ef-3f68add8f070" (UID: "1c73b917-e57a-4532-a6ef-3f68add8f070"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.012149 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run" (OuterVolumeSpecName: "var-run") pod "1c73b917-e57a-4532-a6ef-3f68add8f070" (UID: "1c73b917-e57a-4532-a6ef-3f68add8f070"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.012209 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpspl\" (UniqueName: \"kubernetes.io/projected/1c73b917-e57a-4532-a6ef-3f68add8f070-kube-api-access-lpspl\") pod \"1c73b917-e57a-4532-a6ef-3f68add8f070\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.012468 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-log-ovn\") pod \"1c73b917-e57a-4532-a6ef-3f68add8f070\" (UID: \"1c73b917-e57a-4532-a6ef-3f68add8f070\") " Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.012569 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "1c73b917-e57a-4532-a6ef-3f68add8f070" (UID: "1c73b917-e57a-4532-a6ef-3f68add8f070"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.012979 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "1c73b917-e57a-4532-a6ef-3f68add8f070" (UID: "1c73b917-e57a-4532-a6ef-3f68add8f070"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.013206 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-scripts" (OuterVolumeSpecName: "scripts") pod "1c73b917-e57a-4532-a6ef-3f68add8f070" (UID: "1c73b917-e57a-4532-a6ef-3f68add8f070"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.013623 4848 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.013644 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.013654 4848 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.013668 4848 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c73b917-e57a-4532-a6ef-3f68add8f070-var-run\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.013682 4848 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1c73b917-e57a-4532-a6ef-3f68add8f070-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.020881 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c73b917-e57a-4532-a6ef-3f68add8f070-kube-api-access-lpspl" (OuterVolumeSpecName: "kube-api-access-lpspl") pod "1c73b917-e57a-4532-a6ef-3f68add8f070" (UID: "1c73b917-e57a-4532-a6ef-3f68add8f070"). InnerVolumeSpecName "kube-api-access-lpspl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.115480 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpspl\" (UniqueName: \"kubernetes.io/projected/1c73b917-e57a-4532-a6ef-3f68add8f070-kube-api-access-lpspl\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.320863 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.327399 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/af520475-92ee-41e6-90e1-7ad3d9609d51-etc-swift\") pod \"swift-storage-0\" (UID: \"af520475-92ee-41e6-90e1-7ad3d9609d51\") " pod="openstack/swift-storage-0" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.464174 4848 generic.go:334] "Generic (PLEG): container finished" podID="576845fc-5259-4d19-be49-02ef9575eeb5" containerID="30434e89244ba3643953051b44a8ba55bf8188aca26b919d6708604913af95b3" exitCode=0 Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.464368 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cvcts" event={"ID":"576845fc-5259-4d19-be49-02ef9575eeb5","Type":"ContainerDied","Data":"30434e89244ba3643953051b44a8ba55bf8188aca26b919d6708604913af95b3"} Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.470266 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-mc7ld" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.470231 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h-config-mc7ld" event={"ID":"1c73b917-e57a-4532-a6ef-3f68add8f070","Type":"ContainerDied","Data":"7680bd1084b12a3776ec14bd8003a3fe6a22017e9cb95fdc7d7a96d9b2fbf12a"} Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.470603 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7680bd1084b12a3776ec14bd8003a3fe6a22017e9cb95fdc7d7a96d9b2fbf12a" Jan 28 13:06:34 crc kubenswrapper[4848]: I0128 13:06:34.485890 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.069595 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-p6z9h-config-mc7ld"] Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.078039 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-p6z9h-config-mc7ld"] Jan 28 13:06:35 crc kubenswrapper[4848]: W0128 13:06:35.190470 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf520475_92ee_41e6_90e1_7ad3d9609d51.slice/crio-1206abda4ebefcdbac9352509cdb8d74d60ab2d350eb065bfd0694ddcd2d39d6 WatchSource:0}: Error finding container 1206abda4ebefcdbac9352509cdb8d74d60ab2d350eb065bfd0694ddcd2d39d6: Status 404 returned error can't find the container with id 1206abda4ebefcdbac9352509cdb8d74d60ab2d350eb065bfd0694ddcd2d39d6 Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.200588 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.248539 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-p6z9h-config-kjlbz"] Jan 28 13:06:35 crc kubenswrapper[4848]: E0128 13:06:35.249386 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c73b917-e57a-4532-a6ef-3f68add8f070" containerName="ovn-config" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.249480 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c73b917-e57a-4532-a6ef-3f68add8f070" containerName="ovn-config" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.249813 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c73b917-e57a-4532-a6ef-3f68add8f070" containerName="ovn-config" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.250653 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.253073 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.280545 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-p6z9h" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.280632 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-p6z9h-config-kjlbz"] Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.342441 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-scripts\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.342520 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-log-ovn\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.342581 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.342698 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csc5n\" (UniqueName: \"kubernetes.io/projected/9812f0b5-344f-43df-ae97-ca78cfbbebab-kube-api-access-csc5n\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.342792 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run-ovn\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.342847 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-additional-scripts\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.444713 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-scripts\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.444772 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-log-ovn\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.444837 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.444881 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csc5n\" (UniqueName: \"kubernetes.io/projected/9812f0b5-344f-43df-ae97-ca78cfbbebab-kube-api-access-csc5n\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.444945 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run-ovn\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.444983 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-additional-scripts\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.445548 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.445881 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-additional-scripts\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.445869 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-log-ovn\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.446187 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run-ovn\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.446943 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-scripts\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.478873 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csc5n\" (UniqueName: \"kubernetes.io/projected/9812f0b5-344f-43df-ae97-ca78cfbbebab-kube-api-access-csc5n\") pod \"ovn-controller-p6z9h-config-kjlbz\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.487351 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"1206abda4ebefcdbac9352509cdb8d74d60ab2d350eb065bfd0694ddcd2d39d6"} Jan 28 13:06:35 crc kubenswrapper[4848]: I0128 13:06:35.582151 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.351117 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.351480 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="prometheus" containerID="cri-o://db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c" gracePeriod=600 Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.351639 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="thanos-sidecar" containerID="cri-o://930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075" gracePeriod=600 Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.351703 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="config-reloader" containerID="cri-o://99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67" gracePeriod=600 Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.516095 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cvcts" event={"ID":"576845fc-5259-4d19-be49-02ef9575eeb5","Type":"ContainerDied","Data":"ae609ffc9cc9856aef88da2590626e64ad740f6c86c8568bea6a6e1928518122"} Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.516512 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae609ffc9cc9856aef88da2590626e64ad740f6c86c8568bea6a6e1928518122" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.528797 4848 generic.go:334] "Generic (PLEG): container finished" podID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerID="db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c" exitCode=0 Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.528868 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerDied","Data":"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c"} Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.546535 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.684343 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/576845fc-5259-4d19-be49-02ef9575eeb5-operator-scripts\") pod \"576845fc-5259-4d19-be49-02ef9575eeb5\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.684809 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k978l\" (UniqueName: \"kubernetes.io/projected/576845fc-5259-4d19-be49-02ef9575eeb5-kube-api-access-k978l\") pod \"576845fc-5259-4d19-be49-02ef9575eeb5\" (UID: \"576845fc-5259-4d19-be49-02ef9575eeb5\") " Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.685518 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/576845fc-5259-4d19-be49-02ef9575eeb5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "576845fc-5259-4d19-be49-02ef9575eeb5" (UID: "576845fc-5259-4d19-be49-02ef9575eeb5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.689019 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/576845fc-5259-4d19-be49-02ef9575eeb5-kube-api-access-k978l" (OuterVolumeSpecName: "kube-api-access-k978l") pod "576845fc-5259-4d19-be49-02ef9575eeb5" (UID: "576845fc-5259-4d19-be49-02ef9575eeb5"). InnerVolumeSpecName "kube-api-access-k978l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.787559 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/576845fc-5259-4d19-be49-02ef9575eeb5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.787614 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k978l\" (UniqueName: \"kubernetes.io/projected/576845fc-5259-4d19-be49-02ef9575eeb5-kube-api-access-k978l\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.863730 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c73b917-e57a-4532-a6ef-3f68add8f070" path="/var/lib/kubelet/pods/1c73b917-e57a-4532-a6ef-3f68add8f070/volumes" Jan 28 13:06:36 crc kubenswrapper[4848]: I0128 13:06:36.925266 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-p6z9h-config-kjlbz"] Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.207601 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302312 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-2\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302443 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-tls-assets\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302494 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-1\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302556 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-thanos-prometheus-http-client-file\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302601 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/481d4e0f-f65b-466a-8e9d-f6761e78479f-config-out\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302649 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r6lc\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-kube-api-access-9r6lc\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302702 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-0\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302804 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-config\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.302871 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-web-config\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.303102 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"481d4e0f-f65b-466a-8e9d-f6761e78479f\" (UID: \"481d4e0f-f65b-466a-8e9d-f6761e78479f\") " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.308625 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.309013 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.311129 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.311551 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.320591 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-kube-api-access-9r6lc" (OuterVolumeSpecName: "kube-api-access-9r6lc") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "kube-api-access-9r6lc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.330236 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/481d4e0f-f65b-466a-8e9d-f6761e78479f-config-out" (OuterVolumeSpecName: "config-out") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.341226 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.341428 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-config" (OuterVolumeSpecName: "config") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410092 4848 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410133 4848 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/481d4e0f-f65b-466a-8e9d-f6761e78479f-config-out\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410145 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r6lc\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-kube-api-access-9r6lc\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410157 4848 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410169 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410180 4848 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410191 4848 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/481d4e0f-f65b-466a-8e9d-f6761e78479f-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.410203 4848 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/481d4e0f-f65b-466a-8e9d-f6761e78479f-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.427731 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.441884 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-web-config" (OuterVolumeSpecName: "web-config") pod "481d4e0f-f65b-466a-8e9d-f6761e78479f" (UID: "481d4e0f-f65b-466a-8e9d-f6761e78479f"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.511879 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") on node \"crc\" " Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.512505 4848 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/481d4e0f-f65b-466a-8e9d-f6761e78479f-web-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.541114 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h-config-kjlbz" event={"ID":"9812f0b5-344f-43df-ae97-ca78cfbbebab","Type":"ContainerStarted","Data":"d6f03585028a0d38dbfeb19f1d431dfb80d5c160cac8ed458622fc747cb80fbd"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.542520 4848 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.545624 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"fd8129d016544b7a85ab8fead85fa6ba9aa86c0b202951fb975235ef8d1969c6"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.545675 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"3c0c2923e2b962707c06456f14d2556577931b8e0da1bbecfc8130d02e45e0b9"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.545687 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"7ace46340e16a89b852a831b26fefc996c7538f2038e343c9667f08a87eb5bbe"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.545699 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"a2da6d797805ef2a49f5b053938615ea98f134079f1db6145ed97c740465d0b6"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.550329 4848 generic.go:334] "Generic (PLEG): container finished" podID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerID="930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075" exitCode=0 Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.550377 4848 generic.go:334] "Generic (PLEG): container finished" podID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerID="99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67" exitCode=0 Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.550453 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cvcts" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.551075 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerDied","Data":"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.551157 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.551178 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerDied","Data":"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.551197 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"481d4e0f-f65b-466a-8e9d-f6761e78479f","Type":"ContainerDied","Data":"3926ecd805d8e437cc5991528dbdf2046d9498450b6ff199bab4ba65d8f0dd57"} Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.551230 4848 scope.go:117] "RemoveContainer" containerID="930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.596300 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.606259 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.635464 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.635885 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="init-config-reloader" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.635907 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="init-config-reloader" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.635923 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="config-reloader" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.635931 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="config-reloader" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.635942 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="prometheus" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.635949 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="prometheus" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.635959 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="thanos-sidecar" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.635965 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="thanos-sidecar" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.635982 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="576845fc-5259-4d19-be49-02ef9575eeb5" containerName="mariadb-account-create-update" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.635989 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="576845fc-5259-4d19-be49-02ef9575eeb5" containerName="mariadb-account-create-update" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.636184 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="576845fc-5259-4d19-be49-02ef9575eeb5" containerName="mariadb-account-create-update" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.636200 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="config-reloader" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.636217 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="thanos-sidecar" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.636261 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" containerName="prometheus" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.637940 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.641810 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.642053 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.642133 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.642170 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.642238 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-sll79" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.642300 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.642315 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.642307 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.652071 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.666063 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.730587 4848 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83") on node "crc" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.733293 4848 scope.go:117] "RemoveContainer" containerID="99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.767319 4848 scope.go:117] "RemoveContainer" containerID="db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.796336 4848 scope.go:117] "RemoveContainer" containerID="822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.822109 4848 scope.go:117] "RemoveContainer" containerID="930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.823200 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075\": container with ID starting with 930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075 not found: ID does not exist" containerID="930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.823273 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075"} err="failed to get container status \"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075\": rpc error: code = NotFound desc = could not find container \"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075\": container with ID starting with 930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075 not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.823312 4848 scope.go:117] "RemoveContainer" containerID="99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.823921 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67\": container with ID starting with 99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67 not found: ID does not exist" containerID="99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.823943 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67"} err="failed to get container status \"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67\": rpc error: code = NotFound desc = could not find container \"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67\": container with ID starting with 99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67 not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.823956 4848 scope.go:117] "RemoveContainer" containerID="db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.825409 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c\": container with ID starting with db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c not found: ID does not exist" containerID="db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.825441 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c"} err="failed to get container status \"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c\": rpc error: code = NotFound desc = could not find container \"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c\": container with ID starting with db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.825457 4848 scope.go:117] "RemoveContainer" containerID="822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073" Jan 28 13:06:37 crc kubenswrapper[4848]: E0128 13:06:37.826411 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073\": container with ID starting with 822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073 not found: ID does not exist" containerID="822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.826491 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073"} err="failed to get container status \"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073\": rpc error: code = NotFound desc = could not find container \"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073\": container with ID starting with 822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073 not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.826536 4848 scope.go:117] "RemoveContainer" containerID="930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.829643 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075"} err="failed to get container status \"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075\": rpc error: code = NotFound desc = could not find container \"930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075\": container with ID starting with 930fd954e1d6ac0119f8e4f846f16e99bfe9c839e44af188ea1766d770058075 not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.829681 4848 scope.go:117] "RemoveContainer" containerID="99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830111 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830197 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/0798460c-39c3-4539-8b8a-89a551b4bafc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830241 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830266 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67"} err="failed to get container status \"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67\": rpc error: code = NotFound desc = could not find container \"99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67\": container with ID starting with 99c3b2b047d91f24a3c29cfcb0e2cf45dd4a86286aa9e9a4e6cbf6bb8e47cd67 not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830285 4848 scope.go:117] "RemoveContainer" containerID="db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830302 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830432 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830458 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsl22\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-kube-api-access-bsl22\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830584 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830620 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c"} err="failed to get container status \"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c\": rpc error: code = NotFound desc = could not find container \"db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c\": container with ID starting with db8e7cf43e8a8799ef333b0f09468e3a1d3e87020798b760b286569e042f435c not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830675 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-config\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830726 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830821 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830908 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.831139 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.831186 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.830677 4848 scope.go:117] "RemoveContainer" containerID="822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.833015 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073"} err="failed to get container status \"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073\": rpc error: code = NotFound desc = could not find container \"822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073\": container with ID starting with 822c760f9712789d100876d46f39eff7cb7c4737c54304fe5105c3cf9700a073 not found: ID does not exist" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.834026 4848 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.834061 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fbb6ff8d2ceb994243fa9499d5bbb9ac1ad8a88e4c49c99f41a1170dfb512188/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.884830 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.924284 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.924373 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933020 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933094 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933136 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933175 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/0798460c-39c3-4539-8b8a-89a551b4bafc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933199 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933223 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933276 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933296 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsl22\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-kube-api-access-bsl22\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933329 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933363 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-config\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933387 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.933423 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.934348 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.935893 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.936389 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.940621 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.941947 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-config\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.942090 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/0798460c-39c3-4539-8b8a-89a551b4bafc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.942877 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.943127 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.943578 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.946200 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.947095 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:37 crc kubenswrapper[4848]: I0128 13:06:37.962969 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsl22\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-kube-api-access-bsl22\") pod \"prometheus-metric-storage-0\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:38 crc kubenswrapper[4848]: I0128 13:06:38.018477 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:38 crc kubenswrapper[4848]: I0128 13:06:38.419413 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:06:38 crc kubenswrapper[4848]: I0128 13:06:38.563821 4848 generic.go:334] "Generic (PLEG): container finished" podID="9812f0b5-344f-43df-ae97-ca78cfbbebab" containerID="437d506ab6d6d84fb0f608a850f732026426545988daf1c868b4ab308e2d895f" exitCode=0 Jan 28 13:06:38 crc kubenswrapper[4848]: I0128 13:06:38.563874 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h-config-kjlbz" event={"ID":"9812f0b5-344f-43df-ae97-ca78cfbbebab","Type":"ContainerDied","Data":"437d506ab6d6d84fb0f608a850f732026426545988daf1c868b4ab308e2d895f"} Jan 28 13:06:38 crc kubenswrapper[4848]: W0128 13:06:38.675555 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0798460c_39c3_4539_8b8a_89a551b4bafc.slice/crio-d9b9513a93187d2dcb85a47e371f990b79c3002a45916394d2ca19b606b70414 WatchSource:0}: Error finding container d9b9513a93187d2dcb85a47e371f990b79c3002a45916394d2ca19b606b70414: Status 404 returned error can't find the container with id d9b9513a93187d2dcb85a47e371f990b79c3002a45916394d2ca19b606b70414 Jan 28 13:06:38 crc kubenswrapper[4848]: I0128 13:06:38.865989 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="481d4e0f-f65b-466a-8e9d-f6761e78479f" path="/var/lib/kubelet/pods/481d4e0f-f65b-466a-8e9d-f6761e78479f/volumes" Jan 28 13:06:39 crc kubenswrapper[4848]: I0128 13:06:39.576004 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerStarted","Data":"d9b9513a93187d2dcb85a47e371f990b79c3002a45916394d2ca19b606b70414"} Jan 28 13:06:39 crc kubenswrapper[4848]: I0128 13:06:39.581149 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"e9b98c5155714133e0bb9153d6d064fbde8c0f94efde7f4e7620584ae039c877"} Jan 28 13:06:39 crc kubenswrapper[4848]: I0128 13:06:39.581217 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"b1d6b37bb239cbc9b7117ecd3d6b5498f0a678bae3cb77085724b1d09d87da53"} Jan 28 13:06:39 crc kubenswrapper[4848]: I0128 13:06:39.581228 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"a92fc9a97817a279d1ccd5b5ed12bb3c4dee0c22b2ddc17efd79261fb3568462"} Jan 28 13:06:39 crc kubenswrapper[4848]: I0128 13:06:39.949067 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075365 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csc5n\" (UniqueName: \"kubernetes.io/projected/9812f0b5-344f-43df-ae97-ca78cfbbebab-kube-api-access-csc5n\") pod \"9812f0b5-344f-43df-ae97-ca78cfbbebab\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075458 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-scripts\") pod \"9812f0b5-344f-43df-ae97-ca78cfbbebab\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075523 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-log-ovn\") pod \"9812f0b5-344f-43df-ae97-ca78cfbbebab\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075595 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run\") pod \"9812f0b5-344f-43df-ae97-ca78cfbbebab\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075645 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run-ovn\") pod \"9812f0b5-344f-43df-ae97-ca78cfbbebab\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075676 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-additional-scripts\") pod \"9812f0b5-344f-43df-ae97-ca78cfbbebab\" (UID: \"9812f0b5-344f-43df-ae97-ca78cfbbebab\") " Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075726 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run" (OuterVolumeSpecName: "var-run") pod "9812f0b5-344f-43df-ae97-ca78cfbbebab" (UID: "9812f0b5-344f-43df-ae97-ca78cfbbebab"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075803 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "9812f0b5-344f-43df-ae97-ca78cfbbebab" (UID: "9812f0b5-344f-43df-ae97-ca78cfbbebab"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.075827 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "9812f0b5-344f-43df-ae97-ca78cfbbebab" (UID: "9812f0b5-344f-43df-ae97-ca78cfbbebab"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.076313 4848 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.076334 4848 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.076347 4848 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9812f0b5-344f-43df-ae97-ca78cfbbebab-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.076756 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "9812f0b5-344f-43df-ae97-ca78cfbbebab" (UID: "9812f0b5-344f-43df-ae97-ca78cfbbebab"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.076978 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-scripts" (OuterVolumeSpecName: "scripts") pod "9812f0b5-344f-43df-ae97-ca78cfbbebab" (UID: "9812f0b5-344f-43df-ae97-ca78cfbbebab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.085457 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9812f0b5-344f-43df-ae97-ca78cfbbebab-kube-api-access-csc5n" (OuterVolumeSpecName: "kube-api-access-csc5n") pod "9812f0b5-344f-43df-ae97-ca78cfbbebab" (UID: "9812f0b5-344f-43df-ae97-ca78cfbbebab"). InnerVolumeSpecName "kube-api-access-csc5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.179696 4848 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.180155 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csc5n\" (UniqueName: \"kubernetes.io/projected/9812f0b5-344f-43df-ae97-ca78cfbbebab-kube-api-access-csc5n\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.180171 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9812f0b5-344f-43df-ae97-ca78cfbbebab-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.591679 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-p6z9h-config-kjlbz" event={"ID":"9812f0b5-344f-43df-ae97-ca78cfbbebab","Type":"ContainerDied","Data":"d6f03585028a0d38dbfeb19f1d431dfb80d5c160cac8ed458622fc747cb80fbd"} Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.591752 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6f03585028a0d38dbfeb19f1d431dfb80d5c160cac8ed458622fc747cb80fbd" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.591747 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-p6z9h-config-kjlbz" Jan 28 13:06:40 crc kubenswrapper[4848]: I0128 13:06:40.596579 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"fad1375dc3462735bfd5b87edd33598e67fca868ab7e00707b4cfd188416e463"} Jan 28 13:06:41 crc kubenswrapper[4848]: I0128 13:06:41.047530 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-p6z9h-config-kjlbz"] Jan 28 13:06:41 crc kubenswrapper[4848]: I0128 13:06:41.058119 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-p6z9h-config-kjlbz"] Jan 28 13:06:41 crc kubenswrapper[4848]: I0128 13:06:41.609606 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"60f7b3b9987ff93f8891b69c02fa3b91c3885f95993ece50581a6705ff57c928"} Jan 28 13:06:42 crc kubenswrapper[4848]: I0128 13:06:42.626843 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"b105300bddb96917c3a7ebb4ed37a3d1f9766af7abbcb5e164a32d1a8661c9dc"} Jan 28 13:06:42 crc kubenswrapper[4848]: I0128 13:06:42.627532 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"3707e36751ae91951ba3093d402ecef0331b03515b81678fa9ff295fa60b7d77"} Jan 28 13:06:42 crc kubenswrapper[4848]: I0128 13:06:42.627547 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"8eb09726174e895a3ba0f56492bb1400692e862aa36f369713c59b8d1c53c345"} Jan 28 13:06:42 crc kubenswrapper[4848]: I0128 13:06:42.627559 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"e538e3b80b0794cb4eefe94aa57b9082c9ad71d567e27f1f94e0b7f51590cf64"} Jan 28 13:06:42 crc kubenswrapper[4848]: I0128 13:06:42.627569 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"6dc54c70c689940c3a3029c93eb8598d738346b3c5296725ff8bcbefd545d81c"} Jan 28 13:06:42 crc kubenswrapper[4848]: I0128 13:06:42.629274 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerStarted","Data":"e8b6318e2bb12a46630e1293e6b7168a2e5085fed5a358ea3adb1c24072bf940"} Jan 28 13:06:42 crc kubenswrapper[4848]: I0128 13:06:42.863708 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9812f0b5-344f-43df-ae97-ca78cfbbebab" path="/var/lib/kubelet/pods/9812f0b5-344f-43df-ae97-ca78cfbbebab/volumes" Jan 28 13:06:43 crc kubenswrapper[4848]: I0128 13:06:43.646554 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"af520475-92ee-41e6-90e1-7ad3d9609d51","Type":"ContainerStarted","Data":"a13119adbe29eccf877f3573742423b1806c00ba34b5a41b2c5e2a1c04ecae4f"} Jan 28 13:06:43 crc kubenswrapper[4848]: I0128 13:06:43.689528 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.512198819 podStartE2EDuration="42.689488727s" podCreationTimestamp="2026-01-28 13:06:01 +0000 UTC" firstStartedPulling="2026-01-28 13:06:35.200425676 +0000 UTC m=+1222.112642714" lastFinishedPulling="2026-01-28 13:06:40.377715564 +0000 UTC m=+1227.289932622" observedRunningTime="2026-01-28 13:06:43.683567884 +0000 UTC m=+1230.595784952" watchObservedRunningTime="2026-01-28 13:06:43.689488727 +0000 UTC m=+1230.601705785" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.022748 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b5fd78997-2tm5l"] Jan 28 13:06:44 crc kubenswrapper[4848]: E0128 13:06:44.023869 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9812f0b5-344f-43df-ae97-ca78cfbbebab" containerName="ovn-config" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.023956 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9812f0b5-344f-43df-ae97-ca78cfbbebab" containerName="ovn-config" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.024204 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9812f0b5-344f-43df-ae97-ca78cfbbebab" containerName="ovn-config" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.025436 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.029257 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.047456 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b5fd78997-2tm5l"] Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.166657 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-svc\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.166990 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqslv\" (UniqueName: \"kubernetes.io/projected/cf32a5d4-51d4-45a4-973f-c47bee280747-kube-api-access-tqslv\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.167127 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-config\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.167283 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.167373 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.167420 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-swift-storage-0\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.269032 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqslv\" (UniqueName: \"kubernetes.io/projected/cf32a5d4-51d4-45a4-973f-c47bee280747-kube-api-access-tqslv\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.269113 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-config\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.269148 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.269177 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.269199 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-swift-storage-0\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.269233 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-svc\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.270306 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.270340 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-svc\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.270503 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.270731 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-config\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.270830 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-swift-storage-0\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.295683 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqslv\" (UniqueName: \"kubernetes.io/projected/cf32a5d4-51d4-45a4-973f-c47bee280747-kube-api-access-tqslv\") pod \"dnsmasq-dns-7b5fd78997-2tm5l\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.348716 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:44 crc kubenswrapper[4848]: I0128 13:06:44.901384 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b5fd78997-2tm5l"] Jan 28 13:06:45 crc kubenswrapper[4848]: I0128 13:06:45.354860 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Jan 28 13:06:45 crc kubenswrapper[4848]: I0128 13:06:45.670406 4848 generic.go:334] "Generic (PLEG): container finished" podID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerID="fd7f250bd216ccffed2c106868466a0ceaa9641fd6bb4dfe71e7f3e834f04b33" exitCode=0 Jan 28 13:06:45 crc kubenswrapper[4848]: I0128 13:06:45.670474 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" event={"ID":"cf32a5d4-51d4-45a4-973f-c47bee280747","Type":"ContainerDied","Data":"fd7f250bd216ccffed2c106868466a0ceaa9641fd6bb4dfe71e7f3e834f04b33"} Jan 28 13:06:45 crc kubenswrapper[4848]: I0128 13:06:45.670513 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" event={"ID":"cf32a5d4-51d4-45a4-973f-c47bee280747","Type":"ContainerStarted","Data":"e950c07b94ee14bd3834d01f471154cb6b6aa875de3e1bd814ac01e6ab3afb8a"} Jan 28 13:06:46 crc kubenswrapper[4848]: I0128 13:06:46.040688 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.107:5671: connect: connection refused" Jan 28 13:06:46 crc kubenswrapper[4848]: I0128 13:06:46.168318 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-notifications-server-0" podUID="ff062566-cfd3-4393-b794-695d3473ef1a" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.108:5671: connect: connection refused" Jan 28 13:06:46 crc kubenswrapper[4848]: I0128 13:06:46.681275 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" event={"ID":"cf32a5d4-51d4-45a4-973f-c47bee280747","Type":"ContainerStarted","Data":"74665a5e907557d7da26720b8da3e6482ef23c04552e5a52e296ffd9fc5b505c"} Jan 28 13:06:46 crc kubenswrapper[4848]: I0128 13:06:46.681544 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:46 crc kubenswrapper[4848]: I0128 13:06:46.711366 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" podStartSLOduration=3.711339695 podStartE2EDuration="3.711339695s" podCreationTimestamp="2026-01-28 13:06:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:46.702168501 +0000 UTC m=+1233.614385539" watchObservedRunningTime="2026-01-28 13:06:46.711339695 +0000 UTC m=+1233.623556733" Jan 28 13:06:48 crc kubenswrapper[4848]: I0128 13:06:48.699813 4848 generic.go:334] "Generic (PLEG): container finished" podID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerID="e8b6318e2bb12a46630e1293e6b7168a2e5085fed5a358ea3adb1c24072bf940" exitCode=0 Jan 28 13:06:48 crc kubenswrapper[4848]: I0128 13:06:48.699905 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerDied","Data":"e8b6318e2bb12a46630e1293e6b7168a2e5085fed5a358ea3adb1c24072bf940"} Jan 28 13:06:49 crc kubenswrapper[4848]: I0128 13:06:49.715573 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerStarted","Data":"5ebaefa9171c38760ffd414457b36921e7c0c08f6b6cd8116c58d9ebf4f8ec4c"} Jan 28 13:06:52 crc kubenswrapper[4848]: I0128 13:06:52.756620 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerStarted","Data":"da9cc43340008dfc66d1c1f759a26601dcc6f05b2ae126964c3e43fa00ff3e17"} Jan 28 13:06:52 crc kubenswrapper[4848]: I0128 13:06:52.757551 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerStarted","Data":"6cd42bd9a1d13179e079b31f531dcf9df745313d43f018372add67ea22687774"} Jan 28 13:06:52 crc kubenswrapper[4848]: I0128 13:06:52.800488 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=15.800461883 podStartE2EDuration="15.800461883s" podCreationTimestamp="2026-01-28 13:06:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:06:52.791021793 +0000 UTC m=+1239.703238841" watchObservedRunningTime="2026-01-28 13:06:52.800461883 +0000 UTC m=+1239.712678921" Jan 28 13:06:53 crc kubenswrapper[4848]: I0128 13:06:53.019382 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:53 crc kubenswrapper[4848]: I0128 13:06:53.019504 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:53 crc kubenswrapper[4848]: I0128 13:06:53.031920 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:53 crc kubenswrapper[4848]: I0128 13:06:53.768793 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 13:06:54 crc kubenswrapper[4848]: I0128 13:06:54.355234 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:06:54 crc kubenswrapper[4848]: I0128 13:06:54.426316 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5896b68bd7-jwsfl"] Jan 28 13:06:54 crc kubenswrapper[4848]: I0128 13:06:54.426651 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" podUID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerName="dnsmasq-dns" containerID="cri-o://3ae884951468079c34596a7284856a73c23a9d2189e103d63ee24e42de51e857" gracePeriod=10 Jan 28 13:06:54 crc kubenswrapper[4848]: I0128 13:06:54.778354 4848 generic.go:334] "Generic (PLEG): container finished" podID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerID="3ae884951468079c34596a7284856a73c23a9d2189e103d63ee24e42de51e857" exitCode=0 Jan 28 13:06:54 crc kubenswrapper[4848]: I0128 13:06:54.778398 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" event={"ID":"ed2e9f9f-591c-46cf-86b1-3c530be18542","Type":"ContainerDied","Data":"3ae884951468079c34596a7284856a73c23a9d2189e103d63ee24e42de51e857"} Jan 28 13:06:54 crc kubenswrapper[4848]: I0128 13:06:54.939131 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.082109 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-sb\") pod \"ed2e9f9f-591c-46cf-86b1-3c530be18542\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.082169 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-nb\") pod \"ed2e9f9f-591c-46cf-86b1-3c530be18542\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.082224 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-config\") pod \"ed2e9f9f-591c-46cf-86b1-3c530be18542\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.082294 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-dns-svc\") pod \"ed2e9f9f-591c-46cf-86b1-3c530be18542\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.082365 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxqb5\" (UniqueName: \"kubernetes.io/projected/ed2e9f9f-591c-46cf-86b1-3c530be18542-kube-api-access-nxqb5\") pod \"ed2e9f9f-591c-46cf-86b1-3c530be18542\" (UID: \"ed2e9f9f-591c-46cf-86b1-3c530be18542\") " Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.089877 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed2e9f9f-591c-46cf-86b1-3c530be18542-kube-api-access-nxqb5" (OuterVolumeSpecName: "kube-api-access-nxqb5") pod "ed2e9f9f-591c-46cf-86b1-3c530be18542" (UID: "ed2e9f9f-591c-46cf-86b1-3c530be18542"). InnerVolumeSpecName "kube-api-access-nxqb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.126553 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ed2e9f9f-591c-46cf-86b1-3c530be18542" (UID: "ed2e9f9f-591c-46cf-86b1-3c530be18542"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.127448 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ed2e9f9f-591c-46cf-86b1-3c530be18542" (UID: "ed2e9f9f-591c-46cf-86b1-3c530be18542"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.130142 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ed2e9f9f-591c-46cf-86b1-3c530be18542" (UID: "ed2e9f9f-591c-46cf-86b1-3c530be18542"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.137763 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-config" (OuterVolumeSpecName: "config") pod "ed2e9f9f-591c-46cf-86b1-3c530be18542" (UID: "ed2e9f9f-591c-46cf-86b1-3c530be18542"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.185592 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxqb5\" (UniqueName: \"kubernetes.io/projected/ed2e9f9f-591c-46cf-86b1-3c530be18542-kube-api-access-nxqb5\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.185690 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.185706 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.185741 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.185755 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed2e9f9f-591c-46cf-86b1-3c530be18542-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.356486 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.791057 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.791084 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5896b68bd7-jwsfl" event={"ID":"ed2e9f9f-591c-46cf-86b1-3c530be18542","Type":"ContainerDied","Data":"ee0f8ce6d295bf13500e24f9c603da3f3a4b3b82e2fe609b5ab60ff8e5e573f1"} Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.792580 4848 scope.go:117] "RemoveContainer" containerID="3ae884951468079c34596a7284856a73c23a9d2189e103d63ee24e42de51e857" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.825519 4848 scope.go:117] "RemoveContainer" containerID="f7ac5ba402f76312cd61ef2a1bc9c8df988c72996840e00d4695884becc0cf57" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.830335 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-k59cb"] Jan 28 13:06:55 crc kubenswrapper[4848]: E0128 13:06:55.830929 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerName="init" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.830950 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerName="init" Jan 28 13:06:55 crc kubenswrapper[4848]: E0128 13:06:55.830962 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerName="dnsmasq-dns" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.830970 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerName="dnsmasq-dns" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.831145 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed2e9f9f-591c-46cf-86b1-3c530be18542" containerName="dnsmasq-dns" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.831976 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.850349 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-k59cb"] Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.870008 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5896b68bd7-jwsfl"] Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.884318 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5896b68bd7-jwsfl"] Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.921898 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-gk6gn"] Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.923100 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.938709 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-6a7f-account-create-update-t87b7"] Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.940395 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.944094 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.949403 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-gk6gn"] Jan 28 13:06:55 crc kubenswrapper[4848]: I0128 13:06:55.959238 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6a7f-account-create-update-t87b7"] Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.000618 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlr54\" (UniqueName: \"kubernetes.io/projected/f18cf42c-0012-44b5-8fc3-697ff0dc8099-kube-api-access-tlr54\") pod \"cinder-db-create-k59cb\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.000721 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f18cf42c-0012-44b5-8fc3-697ff0dc8099-operator-scripts\") pod \"cinder-db-create-k59cb\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.041480 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.041545 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-f8f8-account-create-update-58wd2"] Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.043166 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.061302 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.075812 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f8f8-account-create-update-58wd2"] Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.104427 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b5cee1-1fa4-493c-8888-92e58d63e28e-operator-scripts\") pod \"barbican-6a7f-account-create-update-t87b7\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.104518 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f18cf42c-0012-44b5-8fc3-697ff0dc8099-operator-scripts\") pod \"cinder-db-create-k59cb\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.104565 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfckv\" (UniqueName: \"kubernetes.io/projected/1c572357-66c7-4bf0-b000-4881dca67248-kube-api-access-sfckv\") pod \"barbican-db-create-gk6gn\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.104700 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqrnw\" (UniqueName: \"kubernetes.io/projected/12b5cee1-1fa4-493c-8888-92e58d63e28e-kube-api-access-nqrnw\") pod \"barbican-6a7f-account-create-update-t87b7\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.104829 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlr54\" (UniqueName: \"kubernetes.io/projected/f18cf42c-0012-44b5-8fc3-697ff0dc8099-kube-api-access-tlr54\") pod \"cinder-db-create-k59cb\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.104920 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c572357-66c7-4bf0-b000-4881dca67248-operator-scripts\") pod \"barbican-db-create-gk6gn\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.105699 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f18cf42c-0012-44b5-8fc3-697ff0dc8099-operator-scripts\") pod \"cinder-db-create-k59cb\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.145418 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlr54\" (UniqueName: \"kubernetes.io/projected/f18cf42c-0012-44b5-8fc3-697ff0dc8099-kube-api-access-tlr54\") pod \"cinder-db-create-k59cb\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.177494 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-notifications-server-0" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.206689 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqrnw\" (UniqueName: \"kubernetes.io/projected/12b5cee1-1fa4-493c-8888-92e58d63e28e-kube-api-access-nqrnw\") pod \"barbican-6a7f-account-create-update-t87b7\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.206767 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87c4cea-2693-4000-8635-1fcc694ead7c-operator-scripts\") pod \"cinder-f8f8-account-create-update-58wd2\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.206836 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjkvb\" (UniqueName: \"kubernetes.io/projected/c87c4cea-2693-4000-8635-1fcc694ead7c-kube-api-access-kjkvb\") pod \"cinder-f8f8-account-create-update-58wd2\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.206872 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c572357-66c7-4bf0-b000-4881dca67248-operator-scripts\") pod \"barbican-db-create-gk6gn\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.206936 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b5cee1-1fa4-493c-8888-92e58d63e28e-operator-scripts\") pod \"barbican-6a7f-account-create-update-t87b7\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.206986 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfckv\" (UniqueName: \"kubernetes.io/projected/1c572357-66c7-4bf0-b000-4881dca67248-kube-api-access-sfckv\") pod \"barbican-db-create-gk6gn\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.208228 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b5cee1-1fa4-493c-8888-92e58d63e28e-operator-scripts\") pod \"barbican-6a7f-account-create-update-t87b7\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.208822 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c572357-66c7-4bf0-b000-4881dca67248-operator-scripts\") pod \"barbican-db-create-gk6gn\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.211658 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-5pkv8"] Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.212967 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.217972 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.218029 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.225161 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ffggt" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.225535 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.231277 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.231802 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-5pkv8"] Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.246213 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqrnw\" (UniqueName: \"kubernetes.io/projected/12b5cee1-1fa4-493c-8888-92e58d63e28e-kube-api-access-nqrnw\") pod \"barbican-6a7f-account-create-update-t87b7\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.253185 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfckv\" (UniqueName: \"kubernetes.io/projected/1c572357-66c7-4bf0-b000-4881dca67248-kube-api-access-sfckv\") pod \"barbican-db-create-gk6gn\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.271625 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.310514 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjkvb\" (UniqueName: \"kubernetes.io/projected/c87c4cea-2693-4000-8635-1fcc694ead7c-kube-api-access-kjkvb\") pod \"cinder-f8f8-account-create-update-58wd2\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.310596 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-config-data\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.310701 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-combined-ca-bundle\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.310860 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87c4cea-2693-4000-8635-1fcc694ead7c-operator-scripts\") pod \"cinder-f8f8-account-create-update-58wd2\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.310887 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-765sd\" (UniqueName: \"kubernetes.io/projected/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-kube-api-access-765sd\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.312982 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87c4cea-2693-4000-8635-1fcc694ead7c-operator-scripts\") pod \"cinder-f8f8-account-create-update-58wd2\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.336054 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjkvb\" (UniqueName: \"kubernetes.io/projected/c87c4cea-2693-4000-8635-1fcc694ead7c-kube-api-access-kjkvb\") pod \"cinder-f8f8-account-create-update-58wd2\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.376629 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.421013 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-765sd\" (UniqueName: \"kubernetes.io/projected/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-kube-api-access-765sd\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.421197 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-config-data\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.424039 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-combined-ca-bundle\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.429501 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-combined-ca-bundle\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.431846 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-config-data\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.442102 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-765sd\" (UniqueName: \"kubernetes.io/projected/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-kube-api-access-765sd\") pod \"keystone-db-sync-5pkv8\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.553958 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.656901 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.863380 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed2e9f9f-591c-46cf-86b1-3c530be18542" path="/var/lib/kubelet/pods/ed2e9f9f-591c-46cf-86b1-3c530be18542/volumes" Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.931698 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6a7f-account-create-update-t87b7"] Jan 28 13:06:56 crc kubenswrapper[4848]: W0128 13:06:56.949794 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf18cf42c_0012_44b5_8fc3_697ff0dc8099.slice/crio-1a9e8f0599e0e6b5cdfa5f508313404130e0737bddc8f2b84c8f0170027179d0 WatchSource:0}: Error finding container 1a9e8f0599e0e6b5cdfa5f508313404130e0737bddc8f2b84c8f0170027179d0: Status 404 returned error can't find the container with id 1a9e8f0599e0e6b5cdfa5f508313404130e0737bddc8f2b84c8f0170027179d0 Jan 28 13:06:56 crc kubenswrapper[4848]: I0128 13:06:56.951096 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-k59cb"] Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.059235 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f8f8-account-create-update-58wd2"] Jan 28 13:06:57 crc kubenswrapper[4848]: W0128 13:06:57.062179 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc87c4cea_2693_4000_8635_1fcc694ead7c.slice/crio-eab71d21db0a82d80c135987699fc1e9c5b09b28bdba8f3f6e13df113b4ad866 WatchSource:0}: Error finding container eab71d21db0a82d80c135987699fc1e9c5b09b28bdba8f3f6e13df113b4ad866: Status 404 returned error can't find the container with id eab71d21db0a82d80c135987699fc1e9c5b09b28bdba8f3f6e13df113b4ad866 Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.243557 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-gk6gn"] Jan 28 13:06:57 crc kubenswrapper[4848]: W0128 13:06:57.266671 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c572357_66c7_4bf0_b000_4881dca67248.slice/crio-83c86f4a898f8e19e4a2f04adc88091954171eee0e51648dc9b5b165215e498d WatchSource:0}: Error finding container 83c86f4a898f8e19e4a2f04adc88091954171eee0e51648dc9b5b165215e498d: Status 404 returned error can't find the container with id 83c86f4a898f8e19e4a2f04adc88091954171eee0e51648dc9b5b165215e498d Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.360235 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-5pkv8"] Jan 28 13:06:57 crc kubenswrapper[4848]: W0128 13:06:57.371381 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc9233f9_ae7a_46f8_bec3_97aa6db5e525.slice/crio-a366db868b563900e0be7c2108bcc13fc30a9b7999461bd197b6fbe27b0766c9 WatchSource:0}: Error finding container a366db868b563900e0be7c2108bcc13fc30a9b7999461bd197b6fbe27b0766c9: Status 404 returned error can't find the container with id a366db868b563900e0be7c2108bcc13fc30a9b7999461bd197b6fbe27b0766c9 Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.841881 4848 generic.go:334] "Generic (PLEG): container finished" podID="c87c4cea-2693-4000-8635-1fcc694ead7c" containerID="4e34736f998da61a923cfa47f535810b95340121c3a3127e7721dc875b77a9fa" exitCode=0 Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.841972 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f8f8-account-create-update-58wd2" event={"ID":"c87c4cea-2693-4000-8635-1fcc694ead7c","Type":"ContainerDied","Data":"4e34736f998da61a923cfa47f535810b95340121c3a3127e7721dc875b77a9fa"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.842006 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f8f8-account-create-update-58wd2" event={"ID":"c87c4cea-2693-4000-8635-1fcc694ead7c","Type":"ContainerStarted","Data":"eab71d21db0a82d80c135987699fc1e9c5b09b28bdba8f3f6e13df113b4ad866"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.843441 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5pkv8" event={"ID":"dc9233f9-ae7a-46f8-bec3-97aa6db5e525","Type":"ContainerStarted","Data":"a366db868b563900e0be7c2108bcc13fc30a9b7999461bd197b6fbe27b0766c9"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.844623 4848 generic.go:334] "Generic (PLEG): container finished" podID="f18cf42c-0012-44b5-8fc3-697ff0dc8099" containerID="04e9d70df11c0d2df711aadbff7a1fd5738fee22f9e4f57db8c3280ff13b8ab7" exitCode=0 Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.844686 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-k59cb" event={"ID":"f18cf42c-0012-44b5-8fc3-697ff0dc8099","Type":"ContainerDied","Data":"04e9d70df11c0d2df711aadbff7a1fd5738fee22f9e4f57db8c3280ff13b8ab7"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.844720 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-k59cb" event={"ID":"f18cf42c-0012-44b5-8fc3-697ff0dc8099","Type":"ContainerStarted","Data":"1a9e8f0599e0e6b5cdfa5f508313404130e0737bddc8f2b84c8f0170027179d0"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.857222 4848 generic.go:334] "Generic (PLEG): container finished" podID="12b5cee1-1fa4-493c-8888-92e58d63e28e" containerID="015096506fe84fd94a5f4336b9ca7b958354133967fed931b410f66a04a59a41" exitCode=0 Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.857434 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6a7f-account-create-update-t87b7" event={"ID":"12b5cee1-1fa4-493c-8888-92e58d63e28e","Type":"ContainerDied","Data":"015096506fe84fd94a5f4336b9ca7b958354133967fed931b410f66a04a59a41"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.857467 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6a7f-account-create-update-t87b7" event={"ID":"12b5cee1-1fa4-493c-8888-92e58d63e28e","Type":"ContainerStarted","Data":"14cac20a3fac4e113cd1baedfe2cf7133d5d1fd39784c01f2a65545033bf7cc6"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.860012 4848 generic.go:334] "Generic (PLEG): container finished" podID="1c572357-66c7-4bf0-b000-4881dca67248" containerID="5eac4dd96fb786c6328553b247219cd5bfaf69e43db6e6d2d650d107c4730ccb" exitCode=0 Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.860046 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-gk6gn" event={"ID":"1c572357-66c7-4bf0-b000-4881dca67248","Type":"ContainerDied","Data":"5eac4dd96fb786c6328553b247219cd5bfaf69e43db6e6d2d650d107c4730ccb"} Jan 28 13:06:57 crc kubenswrapper[4848]: I0128 13:06:57.860069 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-gk6gn" event={"ID":"1c572357-66c7-4bf0-b000-4881dca67248","Type":"ContainerStarted","Data":"83c86f4a898f8e19e4a2f04adc88091954171eee0e51648dc9b5b165215e498d"} Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.094470 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-h859v"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.097302 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.103228 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-8283-account-create-update-6jh6j"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.107821 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.113406 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.148514 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h859v"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.163356 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-8283-account-create-update-6jh6j"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.185929 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-operator-scripts\") pod \"glance-db-create-h859v\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.186006 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g4g5\" (UniqueName: \"kubernetes.io/projected/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-kube-api-access-4g4g5\") pod \"glance-db-create-h859v\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.186049 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbgph\" (UniqueName: \"kubernetes.io/projected/ae3018da-2942-415c-9f0e-c82ce76ecdfd-kube-api-access-hbgph\") pod \"glance-8283-account-create-update-6jh6j\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.186134 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae3018da-2942-415c-9f0e-c82ce76ecdfd-operator-scripts\") pod \"glance-8283-account-create-update-6jh6j\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.188290 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-qhcv6"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.189710 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.192773 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.193194 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-bzpjw" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.203324 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-qhcv6"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288094 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-operator-scripts\") pod \"glance-db-create-h859v\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288610 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g4g5\" (UniqueName: \"kubernetes.io/projected/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-kube-api-access-4g4g5\") pod \"glance-db-create-h859v\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288677 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbgph\" (UniqueName: \"kubernetes.io/projected/ae3018da-2942-415c-9f0e-c82ce76ecdfd-kube-api-access-hbgph\") pod \"glance-8283-account-create-update-6jh6j\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288726 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx64c\" (UniqueName: \"kubernetes.io/projected/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-kube-api-access-sx64c\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288752 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-config-data\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288813 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-db-sync-config-data\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288883 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae3018da-2942-415c-9f0e-c82ce76ecdfd-operator-scripts\") pod \"glance-8283-account-create-update-6jh6j\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.288957 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-combined-ca-bundle\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.289173 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-operator-scripts\") pod \"glance-db-create-h859v\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.290128 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae3018da-2942-415c-9f0e-c82ce76ecdfd-operator-scripts\") pod \"glance-8283-account-create-update-6jh6j\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.301536 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-ffc8h"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.302992 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.342074 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g4g5\" (UniqueName: \"kubernetes.io/projected/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-kube-api-access-4g4g5\") pod \"glance-db-create-h859v\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.365225 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-ffc8h"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.386843 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbgph\" (UniqueName: \"kubernetes.io/projected/ae3018da-2942-415c-9f0e-c82ce76ecdfd-kube-api-access-hbgph\") pod \"glance-8283-account-create-update-6jh6j\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.410894 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5c7x\" (UniqueName: \"kubernetes.io/projected/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-kube-api-access-x5c7x\") pod \"neutron-db-create-ffc8h\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.414529 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-operator-scripts\") pod \"neutron-db-create-ffc8h\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.414598 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx64c\" (UniqueName: \"kubernetes.io/projected/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-kube-api-access-sx64c\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.414632 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-config-data\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.414752 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-db-sync-config-data\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.414909 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-combined-ca-bundle\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.421880 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-combined-ca-bundle\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.421971 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-a957-account-create-update-wln2w"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.424200 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-config-data\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.429108 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h859v" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.433772 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-db-sync-config-data\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.438930 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a957-account-create-update-wln2w"] Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.439060 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.456314 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.461756 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.481101 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx64c\" (UniqueName: \"kubernetes.io/projected/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-kube-api-access-sx64c\") pod \"watcher-db-sync-qhcv6\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.524521 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cce9c77-ae44-4fa5-b025-d1b76d14c352-operator-scripts\") pod \"neutron-a957-account-create-update-wln2w\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.524614 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrw4s\" (UniqueName: \"kubernetes.io/projected/8cce9c77-ae44-4fa5-b025-d1b76d14c352-kube-api-access-xrw4s\") pod \"neutron-a957-account-create-update-wln2w\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.524672 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5c7x\" (UniqueName: \"kubernetes.io/projected/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-kube-api-access-x5c7x\") pod \"neutron-db-create-ffc8h\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.524739 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-operator-scripts\") pod \"neutron-db-create-ffc8h\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.525785 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-operator-scripts\") pod \"neutron-db-create-ffc8h\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.526506 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.558340 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5c7x\" (UniqueName: \"kubernetes.io/projected/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-kube-api-access-x5c7x\") pod \"neutron-db-create-ffc8h\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.570488 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gk6gn" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.585555 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.626852 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cce9c77-ae44-4fa5-b025-d1b76d14c352-operator-scripts\") pod \"neutron-a957-account-create-update-wln2w\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.626928 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrw4s\" (UniqueName: \"kubernetes.io/projected/8cce9c77-ae44-4fa5-b025-d1b76d14c352-kube-api-access-xrw4s\") pod \"neutron-a957-account-create-update-wln2w\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.628023 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cce9c77-ae44-4fa5-b025-d1b76d14c352-operator-scripts\") pod \"neutron-a957-account-create-update-wln2w\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.635601 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ffc8h" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.658127 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrw4s\" (UniqueName: \"kubernetes.io/projected/8cce9c77-ae44-4fa5-b025-d1b76d14c352-kube-api-access-xrw4s\") pod \"neutron-a957-account-create-update-wln2w\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.662695 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k59cb" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.685134 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.729220 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f18cf42c-0012-44b5-8fc3-697ff0dc8099-operator-scripts\") pod \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.729360 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfckv\" (UniqueName: \"kubernetes.io/projected/1c572357-66c7-4bf0-b000-4881dca67248-kube-api-access-sfckv\") pod \"1c572357-66c7-4bf0-b000-4881dca67248\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.729441 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87c4cea-2693-4000-8635-1fcc694ead7c-operator-scripts\") pod \"c87c4cea-2693-4000-8635-1fcc694ead7c\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.729545 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjkvb\" (UniqueName: \"kubernetes.io/projected/c87c4cea-2693-4000-8635-1fcc694ead7c-kube-api-access-kjkvb\") pod \"c87c4cea-2693-4000-8635-1fcc694ead7c\" (UID: \"c87c4cea-2693-4000-8635-1fcc694ead7c\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.729608 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlr54\" (UniqueName: \"kubernetes.io/projected/f18cf42c-0012-44b5-8fc3-697ff0dc8099-kube-api-access-tlr54\") pod \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\" (UID: \"f18cf42c-0012-44b5-8fc3-697ff0dc8099\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.729654 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c572357-66c7-4bf0-b000-4881dca67248-operator-scripts\") pod \"1c572357-66c7-4bf0-b000-4881dca67248\" (UID: \"1c572357-66c7-4bf0-b000-4881dca67248\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.730580 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c87c4cea-2693-4000-8635-1fcc694ead7c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c87c4cea-2693-4000-8635-1fcc694ead7c" (UID: "c87c4cea-2693-4000-8635-1fcc694ead7c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.730628 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f18cf42c-0012-44b5-8fc3-697ff0dc8099-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f18cf42c-0012-44b5-8fc3-697ff0dc8099" (UID: "f18cf42c-0012-44b5-8fc3-697ff0dc8099"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.731737 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c572357-66c7-4bf0-b000-4881dca67248-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1c572357-66c7-4bf0-b000-4881dca67248" (UID: "1c572357-66c7-4bf0-b000-4881dca67248"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.735627 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f18cf42c-0012-44b5-8fc3-697ff0dc8099-kube-api-access-tlr54" (OuterVolumeSpecName: "kube-api-access-tlr54") pod "f18cf42c-0012-44b5-8fc3-697ff0dc8099" (UID: "f18cf42c-0012-44b5-8fc3-697ff0dc8099"). InnerVolumeSpecName "kube-api-access-tlr54". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.736778 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c572357-66c7-4bf0-b000-4881dca67248-kube-api-access-sfckv" (OuterVolumeSpecName: "kube-api-access-sfckv") pod "1c572357-66c7-4bf0-b000-4881dca67248" (UID: "1c572357-66c7-4bf0-b000-4881dca67248"). InnerVolumeSpecName "kube-api-access-sfckv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.737680 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c87c4cea-2693-4000-8635-1fcc694ead7c-kube-api-access-kjkvb" (OuterVolumeSpecName: "kube-api-access-kjkvb") pod "c87c4cea-2693-4000-8635-1fcc694ead7c" (UID: "c87c4cea-2693-4000-8635-1fcc694ead7c"). InnerVolumeSpecName "kube-api-access-kjkvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831303 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqrnw\" (UniqueName: \"kubernetes.io/projected/12b5cee1-1fa4-493c-8888-92e58d63e28e-kube-api-access-nqrnw\") pod \"12b5cee1-1fa4-493c-8888-92e58d63e28e\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831383 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b5cee1-1fa4-493c-8888-92e58d63e28e-operator-scripts\") pod \"12b5cee1-1fa4-493c-8888-92e58d63e28e\" (UID: \"12b5cee1-1fa4-493c-8888-92e58d63e28e\") " Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831835 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlr54\" (UniqueName: \"kubernetes.io/projected/f18cf42c-0012-44b5-8fc3-697ff0dc8099-kube-api-access-tlr54\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831849 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c572357-66c7-4bf0-b000-4881dca67248-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831858 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f18cf42c-0012-44b5-8fc3-697ff0dc8099-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831867 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfckv\" (UniqueName: \"kubernetes.io/projected/1c572357-66c7-4bf0-b000-4881dca67248-kube-api-access-sfckv\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831875 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87c4cea-2693-4000-8635-1fcc694ead7c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.831883 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjkvb\" (UniqueName: \"kubernetes.io/projected/c87c4cea-2693-4000-8635-1fcc694ead7c-kube-api-access-kjkvb\") on node \"crc\" DevicePath \"\"" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.832016 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12b5cee1-1fa4-493c-8888-92e58d63e28e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "12b5cee1-1fa4-493c-8888-92e58d63e28e" (UID: "12b5cee1-1fa4-493c-8888-92e58d63e28e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:06:59 crc kubenswrapper[4848]: I0128 13:06:59.835477 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12b5cee1-1fa4-493c-8888-92e58d63e28e-kube-api-access-nqrnw" (OuterVolumeSpecName: "kube-api-access-nqrnw") pod "12b5cee1-1fa4-493c-8888-92e58d63e28e" (UID: "12b5cee1-1fa4-493c-8888-92e58d63e28e"). InnerVolumeSpecName "kube-api-access-nqrnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.880206 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.893297 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f8f8-account-create-update-58wd2" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.893300 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f8f8-account-create-update-58wd2" event={"ID":"c87c4cea-2693-4000-8635-1fcc694ead7c","Type":"ContainerDied","Data":"eab71d21db0a82d80c135987699fc1e9c5b09b28bdba8f3f6e13df113b4ad866"} Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.893356 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eab71d21db0a82d80c135987699fc1e9c5b09b28bdba8f3f6e13df113b4ad866" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.895701 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-k59cb" event={"ID":"f18cf42c-0012-44b5-8fc3-697ff0dc8099","Type":"ContainerDied","Data":"1a9e8f0599e0e6b5cdfa5f508313404130e0737bddc8f2b84c8f0170027179d0"} Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.895753 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a9e8f0599e0e6b5cdfa5f508313404130e0737bddc8f2b84c8f0170027179d0" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.895827 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k59cb" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.903453 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6a7f-account-create-update-t87b7" event={"ID":"12b5cee1-1fa4-493c-8888-92e58d63e28e","Type":"ContainerDied","Data":"14cac20a3fac4e113cd1baedfe2cf7133d5d1fd39784c01f2a65545033bf7cc6"} Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.903515 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14cac20a3fac4e113cd1baedfe2cf7133d5d1fd39784c01f2a65545033bf7cc6" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.903600 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a7f-account-create-update-t87b7" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.908667 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-gk6gn" event={"ID":"1c572357-66c7-4bf0-b000-4881dca67248","Type":"ContainerDied","Data":"83c86f4a898f8e19e4a2f04adc88091954171eee0e51648dc9b5b165215e498d"} Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.908703 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83c86f4a898f8e19e4a2f04adc88091954171eee0e51648dc9b5b165215e498d" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.908767 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-gk6gn" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.934431 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqrnw\" (UniqueName: \"kubernetes.io/projected/12b5cee1-1fa4-493c-8888-92e58d63e28e-kube-api-access-nqrnw\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:06:59.934543 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12b5cee1-1fa4-493c-8888-92e58d63e28e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:07:00.816752 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-8283-account-create-update-6jh6j"] Jan 28 13:07:00 crc kubenswrapper[4848]: I0128 13:07:00.821370 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h859v"] Jan 28 13:07:01 crc kubenswrapper[4848]: I0128 13:07:01.139875 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-qhcv6"] Jan 28 13:07:01 crc kubenswrapper[4848]: I0128 13:07:01.154525 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a957-account-create-update-wln2w"] Jan 28 13:07:01 crc kubenswrapper[4848]: I0128 13:07:01.165364 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-ffc8h"] Jan 28 13:07:03 crc kubenswrapper[4848]: W0128 13:07:03.343710 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5e91e63_da82_4c26_a0b7_1ab2f9b45396.slice/crio-d386403d2e5979f0c9b5c1f544985123a25003573e18564b57da217a2ad9dc73 WatchSource:0}: Error finding container d386403d2e5979f0c9b5c1f544985123a25003573e18564b57da217a2ad9dc73: Status 404 returned error can't find the container with id d386403d2e5979f0c9b5c1f544985123a25003573e18564b57da217a2ad9dc73 Jan 28 13:07:03 crc kubenswrapper[4848]: W0128 13:07:03.346273 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae3018da_2942_415c_9f0e_c82ce76ecdfd.slice/crio-0405e337b14f3f8682f2ae3a02c4a01501aca07407b1d042fb95d95bcbbcd656 WatchSource:0}: Error finding container 0405e337b14f3f8682f2ae3a02c4a01501aca07407b1d042fb95d95bcbbcd656: Status 404 returned error can't find the container with id 0405e337b14f3f8682f2ae3a02c4a01501aca07407b1d042fb95d95bcbbcd656 Jan 28 13:07:03 crc kubenswrapper[4848]: W0128 13:07:03.358135 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd414e9e0_a933_4e8d_b7c8_3a34a145aa9f.slice/crio-7a59bdb611b8caeaaba4e19999318ba9e9625d56359a6d639ddd7f735bc3a56a WatchSource:0}: Error finding container 7a59bdb611b8caeaaba4e19999318ba9e9625d56359a6d639ddd7f735bc3a56a: Status 404 returned error can't find the container with id 7a59bdb611b8caeaaba4e19999318ba9e9625d56359a6d639ddd7f735bc3a56a Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.964772 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h859v" event={"ID":"b5e91e63-da82-4c26-a0b7-1ab2f9b45396","Type":"ContainerStarted","Data":"0463f9dd2ae519a48957286e11f663a4bd8c8751fec3cb6ec2c6467847809818"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.965306 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h859v" event={"ID":"b5e91e63-da82-4c26-a0b7-1ab2f9b45396","Type":"ContainerStarted","Data":"d386403d2e5979f0c9b5c1f544985123a25003573e18564b57da217a2ad9dc73"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.968241 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a957-account-create-update-wln2w" event={"ID":"8cce9c77-ae44-4fa5-b025-d1b76d14c352","Type":"ContainerStarted","Data":"a5ec1b515bd1631c7a555d34f8b8848f9a1f2338e2d00eda2558e401569d2132"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.968304 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a957-account-create-update-wln2w" event={"ID":"8cce9c77-ae44-4fa5-b025-d1b76d14c352","Type":"ContainerStarted","Data":"af49f72bd77e5b18e3271889f6d15a384014935ba951b8907c3d470d1847f351"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.972585 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5pkv8" event={"ID":"dc9233f9-ae7a-46f8-bec3-97aa6db5e525","Type":"ContainerStarted","Data":"d604957561136c62c92d570ae008387644ae6fd05288d6db20f69d27d0aa5671"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.975350 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-8283-account-create-update-6jh6j" event={"ID":"ae3018da-2942-415c-9f0e-c82ce76ecdfd","Type":"ContainerStarted","Data":"2d2cb277ec4b7229eb89dfe85648203239f2bb02d4f98cb7321978d182742ac7"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.976040 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-8283-account-create-update-6jh6j" event={"ID":"ae3018da-2942-415c-9f0e-c82ce76ecdfd","Type":"ContainerStarted","Data":"0405e337b14f3f8682f2ae3a02c4a01501aca07407b1d042fb95d95bcbbcd656"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.978763 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ffc8h" event={"ID":"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f","Type":"ContainerStarted","Data":"96d971ff5f17c4ddca589ef1a8589ab52ad6d71e721a8cd230b7b7c78b96b320"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.978834 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ffc8h" event={"ID":"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f","Type":"ContainerStarted","Data":"7a59bdb611b8caeaaba4e19999318ba9e9625d56359a6d639ddd7f735bc3a56a"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.982479 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qhcv6" event={"ID":"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7","Type":"ContainerStarted","Data":"27a0b790a859cc6bbca440958bbdeee9552d0a71ff1615f9a7c8ebb4fd2857f1"} Jan 28 13:07:03 crc kubenswrapper[4848]: I0128 13:07:03.987217 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-h859v" podStartSLOduration=4.987194318 podStartE2EDuration="4.987194318s" podCreationTimestamp="2026-01-28 13:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:03.982102367 +0000 UTC m=+1250.894319405" watchObservedRunningTime="2026-01-28 13:07:03.987194318 +0000 UTC m=+1250.899411356" Jan 28 13:07:04 crc kubenswrapper[4848]: I0128 13:07:04.025671 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-ffc8h" podStartSLOduration=5.025646469 podStartE2EDuration="5.025646469s" podCreationTimestamp="2026-01-28 13:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:04.005105182 +0000 UTC m=+1250.917322220" watchObservedRunningTime="2026-01-28 13:07:04.025646469 +0000 UTC m=+1250.937863507" Jan 28 13:07:04 crc kubenswrapper[4848]: I0128 13:07:04.030359 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-8283-account-create-update-6jh6j" podStartSLOduration=5.030341629 podStartE2EDuration="5.030341629s" podCreationTimestamp="2026-01-28 13:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:04.021109795 +0000 UTC m=+1250.933326843" watchObservedRunningTime="2026-01-28 13:07:04.030341629 +0000 UTC m=+1250.942558667" Jan 28 13:07:04 crc kubenswrapper[4848]: I0128 13:07:04.043572 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-a957-account-create-update-wln2w" podStartSLOduration=5.043544694 podStartE2EDuration="5.043544694s" podCreationTimestamp="2026-01-28 13:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:04.03580104 +0000 UTC m=+1250.948018078" watchObservedRunningTime="2026-01-28 13:07:04.043544694 +0000 UTC m=+1250.955761732" Jan 28 13:07:04 crc kubenswrapper[4848]: I0128 13:07:04.066493 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-5pkv8" podStartSLOduration=1.9809382169999998 podStartE2EDuration="8.066460837s" podCreationTimestamp="2026-01-28 13:06:56 +0000 UTC" firstStartedPulling="2026-01-28 13:06:57.376617067 +0000 UTC m=+1244.288834105" lastFinishedPulling="2026-01-28 13:07:03.462139687 +0000 UTC m=+1250.374356725" observedRunningTime="2026-01-28 13:07:04.057396017 +0000 UTC m=+1250.969613105" watchObservedRunningTime="2026-01-28 13:07:04.066460837 +0000 UTC m=+1250.978677875" Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.001476 4848 generic.go:334] "Generic (PLEG): container finished" podID="8cce9c77-ae44-4fa5-b025-d1b76d14c352" containerID="a5ec1b515bd1631c7a555d34f8b8848f9a1f2338e2d00eda2558e401569d2132" exitCode=0 Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.001954 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a957-account-create-update-wln2w" event={"ID":"8cce9c77-ae44-4fa5-b025-d1b76d14c352","Type":"ContainerDied","Data":"a5ec1b515bd1631c7a555d34f8b8848f9a1f2338e2d00eda2558e401569d2132"} Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.011965 4848 generic.go:334] "Generic (PLEG): container finished" podID="ae3018da-2942-415c-9f0e-c82ce76ecdfd" containerID="2d2cb277ec4b7229eb89dfe85648203239f2bb02d4f98cb7321978d182742ac7" exitCode=0 Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.012237 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-8283-account-create-update-6jh6j" event={"ID":"ae3018da-2942-415c-9f0e-c82ce76ecdfd","Type":"ContainerDied","Data":"2d2cb277ec4b7229eb89dfe85648203239f2bb02d4f98cb7321978d182742ac7"} Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.015237 4848 generic.go:334] "Generic (PLEG): container finished" podID="d414e9e0-a933-4e8d-b7c8-3a34a145aa9f" containerID="96d971ff5f17c4ddca589ef1a8589ab52ad6d71e721a8cd230b7b7c78b96b320" exitCode=0 Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.015440 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ffc8h" event={"ID":"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f","Type":"ContainerDied","Data":"96d971ff5f17c4ddca589ef1a8589ab52ad6d71e721a8cd230b7b7c78b96b320"} Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.019492 4848 generic.go:334] "Generic (PLEG): container finished" podID="b5e91e63-da82-4c26-a0b7-1ab2f9b45396" containerID="0463f9dd2ae519a48957286e11f663a4bd8c8751fec3cb6ec2c6467847809818" exitCode=0 Jan 28 13:07:05 crc kubenswrapper[4848]: I0128 13:07:05.022436 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h859v" event={"ID":"b5e91e63-da82-4c26-a0b7-1ab2f9b45396","Type":"ContainerDied","Data":"0463f9dd2ae519a48957286e11f663a4bd8c8751fec3cb6ec2c6467847809818"} Jan 28 13:07:07 crc kubenswrapper[4848]: I0128 13:07:07.924834 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:07:07 crc kubenswrapper[4848]: I0128 13:07:07.925834 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.090810 4848 generic.go:334] "Generic (PLEG): container finished" podID="dc9233f9-ae7a-46f8-bec3-97aa6db5e525" containerID="d604957561136c62c92d570ae008387644ae6fd05288d6db20f69d27d0aa5671" exitCode=0 Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.090899 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5pkv8" event={"ID":"dc9233f9-ae7a-46f8-bec3-97aa6db5e525","Type":"ContainerDied","Data":"d604957561136c62c92d570ae008387644ae6fd05288d6db20f69d27d0aa5671"} Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.448411 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h859v" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.480222 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ffc8h" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.490424 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.523566 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.538059 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrw4s\" (UniqueName: \"kubernetes.io/projected/8cce9c77-ae44-4fa5-b025-d1b76d14c352-kube-api-access-xrw4s\") pod \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.538231 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-operator-scripts\") pod \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.538534 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g4g5\" (UniqueName: \"kubernetes.io/projected/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-kube-api-access-4g4g5\") pod \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\" (UID: \"b5e91e63-da82-4c26-a0b7-1ab2f9b45396\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.538582 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cce9c77-ae44-4fa5-b025-d1b76d14c352-operator-scripts\") pod \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\" (UID: \"8cce9c77-ae44-4fa5-b025-d1b76d14c352\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.538662 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5c7x\" (UniqueName: \"kubernetes.io/projected/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-kube-api-access-x5c7x\") pod \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.538739 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-operator-scripts\") pod \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\" (UID: \"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.539295 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b5e91e63-da82-4c26-a0b7-1ab2f9b45396" (UID: "b5e91e63-da82-4c26-a0b7-1ab2f9b45396"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.539727 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d414e9e0-a933-4e8d-b7c8-3a34a145aa9f" (UID: "d414e9e0-a933-4e8d-b7c8-3a34a145aa9f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.539877 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cce9c77-ae44-4fa5-b025-d1b76d14c352-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8cce9c77-ae44-4fa5-b025-d1b76d14c352" (UID: "8cce9c77-ae44-4fa5-b025-d1b76d14c352"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.550169 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-kube-api-access-4g4g5" (OuterVolumeSpecName: "kube-api-access-4g4g5") pod "b5e91e63-da82-4c26-a0b7-1ab2f9b45396" (UID: "b5e91e63-da82-4c26-a0b7-1ab2f9b45396"). InnerVolumeSpecName "kube-api-access-4g4g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.551336 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cce9c77-ae44-4fa5-b025-d1b76d14c352-kube-api-access-xrw4s" (OuterVolumeSpecName: "kube-api-access-xrw4s") pod "8cce9c77-ae44-4fa5-b025-d1b76d14c352" (UID: "8cce9c77-ae44-4fa5-b025-d1b76d14c352"). InnerVolumeSpecName "kube-api-access-xrw4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.551674 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-kube-api-access-x5c7x" (OuterVolumeSpecName: "kube-api-access-x5c7x") pod "d414e9e0-a933-4e8d-b7c8-3a34a145aa9f" (UID: "d414e9e0-a933-4e8d-b7c8-3a34a145aa9f"). InnerVolumeSpecName "kube-api-access-x5c7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.641025 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbgph\" (UniqueName: \"kubernetes.io/projected/ae3018da-2942-415c-9f0e-c82ce76ecdfd-kube-api-access-hbgph\") pod \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.641389 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae3018da-2942-415c-9f0e-c82ce76ecdfd-operator-scripts\") pod \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\" (UID: \"ae3018da-2942-415c-9f0e-c82ce76ecdfd\") " Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.641969 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g4g5\" (UniqueName: \"kubernetes.io/projected/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-kube-api-access-4g4g5\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.642001 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cce9c77-ae44-4fa5-b025-d1b76d14c352-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.642015 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5c7x\" (UniqueName: \"kubernetes.io/projected/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-kube-api-access-x5c7x\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.642028 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.642041 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrw4s\" (UniqueName: \"kubernetes.io/projected/8cce9c77-ae44-4fa5-b025-d1b76d14c352-kube-api-access-xrw4s\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.642053 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5e91e63-da82-4c26-a0b7-1ab2f9b45396-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.642332 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae3018da-2942-415c-9f0e-c82ce76ecdfd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ae3018da-2942-415c-9f0e-c82ce76ecdfd" (UID: "ae3018da-2942-415c-9f0e-c82ce76ecdfd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.645502 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae3018da-2942-415c-9f0e-c82ce76ecdfd-kube-api-access-hbgph" (OuterVolumeSpecName: "kube-api-access-hbgph") pod "ae3018da-2942-415c-9f0e-c82ce76ecdfd" (UID: "ae3018da-2942-415c-9f0e-c82ce76ecdfd"). InnerVolumeSpecName "kube-api-access-hbgph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.743938 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbgph\" (UniqueName: \"kubernetes.io/projected/ae3018da-2942-415c-9f0e-c82ce76ecdfd-kube-api-access-hbgph\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:09 crc kubenswrapper[4848]: I0128 13:07:09.743983 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae3018da-2942-415c-9f0e-c82ce76ecdfd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.105066 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qhcv6" event={"ID":"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7","Type":"ContainerStarted","Data":"7e61758ab3cc03fb128970cfbe4a697455b6432ebb9504212a0242a33e107169"} Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.107465 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h859v" event={"ID":"b5e91e63-da82-4c26-a0b7-1ab2f9b45396","Type":"ContainerDied","Data":"d386403d2e5979f0c9b5c1f544985123a25003573e18564b57da217a2ad9dc73"} Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.107508 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d386403d2e5979f0c9b5c1f544985123a25003573e18564b57da217a2ad9dc73" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.107567 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h859v" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.127585 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a957-account-create-update-wln2w" event={"ID":"8cce9c77-ae44-4fa5-b025-d1b76d14c352","Type":"ContainerDied","Data":"af49f72bd77e5b18e3271889f6d15a384014935ba951b8907c3d470d1847f351"} Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.127637 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af49f72bd77e5b18e3271889f6d15a384014935ba951b8907c3d470d1847f351" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.127705 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a957-account-create-update-wln2w" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.132790 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-8283-account-create-update-6jh6j" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.132800 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-8283-account-create-update-6jh6j" event={"ID":"ae3018da-2942-415c-9f0e-c82ce76ecdfd","Type":"ContainerDied","Data":"0405e337b14f3f8682f2ae3a02c4a01501aca07407b1d042fb95d95bcbbcd656"} Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.133506 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0405e337b14f3f8682f2ae3a02c4a01501aca07407b1d042fb95d95bcbbcd656" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.138661 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ffc8h" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.138687 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ffc8h" event={"ID":"d414e9e0-a933-4e8d-b7c8-3a34a145aa9f","Type":"ContainerDied","Data":"7a59bdb611b8caeaaba4e19999318ba9e9625d56359a6d639ddd7f735bc3a56a"} Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.138771 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a59bdb611b8caeaaba4e19999318ba9e9625d56359a6d639ddd7f735bc3a56a" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.139890 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-qhcv6" podStartSLOduration=5.223775392 podStartE2EDuration="11.139867122s" podCreationTimestamp="2026-01-28 13:06:59 +0000 UTC" firstStartedPulling="2026-01-28 13:07:03.388888544 +0000 UTC m=+1250.301105582" lastFinishedPulling="2026-01-28 13:07:09.304980274 +0000 UTC m=+1256.217197312" observedRunningTime="2026-01-28 13:07:10.126918344 +0000 UTC m=+1257.039135382" watchObservedRunningTime="2026-01-28 13:07:10.139867122 +0000 UTC m=+1257.052084190" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.595452 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.661267 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-765sd\" (UniqueName: \"kubernetes.io/projected/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-kube-api-access-765sd\") pod \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.661354 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-config-data\") pod \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.661485 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-combined-ca-bundle\") pod \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\" (UID: \"dc9233f9-ae7a-46f8-bec3-97aa6db5e525\") " Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.668042 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-kube-api-access-765sd" (OuterVolumeSpecName: "kube-api-access-765sd") pod "dc9233f9-ae7a-46f8-bec3-97aa6db5e525" (UID: "dc9233f9-ae7a-46f8-bec3-97aa6db5e525"). InnerVolumeSpecName "kube-api-access-765sd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.707024 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc9233f9-ae7a-46f8-bec3-97aa6db5e525" (UID: "dc9233f9-ae7a-46f8-bec3-97aa6db5e525"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.709865 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-config-data" (OuterVolumeSpecName: "config-data") pod "dc9233f9-ae7a-46f8-bec3-97aa6db5e525" (UID: "dc9233f9-ae7a-46f8-bec3-97aa6db5e525"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.763697 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.763767 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:10 crc kubenswrapper[4848]: I0128 13:07:10.763781 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-765sd\" (UniqueName: \"kubernetes.io/projected/dc9233f9-ae7a-46f8-bec3-97aa6db5e525-kube-api-access-765sd\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.163811 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5pkv8" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.164917 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5pkv8" event={"ID":"dc9233f9-ae7a-46f8-bec3-97aa6db5e525","Type":"ContainerDied","Data":"a366db868b563900e0be7c2108bcc13fc30a9b7999461bd197b6fbe27b0766c9"} Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.164958 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a366db868b563900e0be7c2108bcc13fc30a9b7999461bd197b6fbe27b0766c9" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433219 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c786f88cc-fbqrr"] Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433689 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c572357-66c7-4bf0-b000-4881dca67248" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433714 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c572357-66c7-4bf0-b000-4881dca67248" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433735 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f18cf42c-0012-44b5-8fc3-697ff0dc8099" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433745 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f18cf42c-0012-44b5-8fc3-697ff0dc8099" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433756 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc9233f9-ae7a-46f8-bec3-97aa6db5e525" containerName="keystone-db-sync" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433764 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc9233f9-ae7a-46f8-bec3-97aa6db5e525" containerName="keystone-db-sync" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433780 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cce9c77-ae44-4fa5-b025-d1b76d14c352" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433788 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cce9c77-ae44-4fa5-b025-d1b76d14c352" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433809 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5e91e63-da82-4c26-a0b7-1ab2f9b45396" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433818 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5e91e63-da82-4c26-a0b7-1ab2f9b45396" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433834 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c87c4cea-2693-4000-8635-1fcc694ead7c" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433843 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c87c4cea-2693-4000-8635-1fcc694ead7c" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433867 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d414e9e0-a933-4e8d-b7c8-3a34a145aa9f" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433875 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d414e9e0-a933-4e8d-b7c8-3a34a145aa9f" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433892 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae3018da-2942-415c-9f0e-c82ce76ecdfd" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433898 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae3018da-2942-415c-9f0e-c82ce76ecdfd" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: E0128 13:07:11.433911 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12b5cee1-1fa4-493c-8888-92e58d63e28e" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.433921 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="12b5cee1-1fa4-493c-8888-92e58d63e28e" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434101 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cce9c77-ae44-4fa5-b025-d1b76d14c352" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434132 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="12b5cee1-1fa4-493c-8888-92e58d63e28e" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434152 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c87c4cea-2693-4000-8635-1fcc694ead7c" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434161 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f18cf42c-0012-44b5-8fc3-697ff0dc8099" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434169 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5e91e63-da82-4c26-a0b7-1ab2f9b45396" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434182 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae3018da-2942-415c-9f0e-c82ce76ecdfd" containerName="mariadb-account-create-update" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434194 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c572357-66c7-4bf0-b000-4881dca67248" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434205 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc9233f9-ae7a-46f8-bec3-97aa6db5e525" containerName="keystone-db-sync" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.434214 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="d414e9e0-a933-4e8d-b7c8-3a34a145aa9f" containerName="mariadb-database-create" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.435230 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.451385 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gszqt"] Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.453098 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.458838 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.459058 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.470054 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.471001 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ffggt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.493294 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c786f88cc-fbqrr"] Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.527154 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.577851 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gszqt"] Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593215 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrk5d\" (UniqueName: \"kubernetes.io/projected/0277befd-8625-4350-8ed7-c7a756caa729-kube-api-access-lrk5d\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593293 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-sb\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593315 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-scripts\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593337 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-fernet-keys\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593362 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-credential-keys\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593385 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-config\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593405 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-swift-storage-0\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593425 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-config-data\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593500 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-svc\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593520 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-nb\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593540 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-combined-ca-bundle\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.593579 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbltl\" (UniqueName: \"kubernetes.io/projected/b6ded25e-7c44-40d6-bf27-f23d595c4776-kube-api-access-gbltl\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696337 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbltl\" (UniqueName: \"kubernetes.io/projected/b6ded25e-7c44-40d6-bf27-f23d595c4776-kube-api-access-gbltl\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696408 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrk5d\" (UniqueName: \"kubernetes.io/projected/0277befd-8625-4350-8ed7-c7a756caa729-kube-api-access-lrk5d\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696427 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-sb\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696447 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-scripts\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696466 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-fernet-keys\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696507 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-credential-keys\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696530 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-config\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696549 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-swift-storage-0\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696569 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-config-data\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696629 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-svc\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696649 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-nb\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.696667 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-combined-ca-bundle\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.730834 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-svc\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.733283 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-config-data\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.739671 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-config\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.739952 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-scripts\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.740215 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-nb\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.740548 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-swift-storage-0\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.740560 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-sb\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.740991 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-combined-ca-bundle\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.743204 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-fernet-keys\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.743678 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-credential-keys\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.759429 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68c6884989-bcdsh"] Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.761301 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.779682 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-x5qcb" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.779995 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.780155 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.780334 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.801131 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbltl\" (UniqueName: \"kubernetes.io/projected/b6ded25e-7c44-40d6-bf27-f23d595c4776-kube-api-access-gbltl\") pod \"keystone-bootstrap-gszqt\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.845384 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68c6884989-bcdsh"] Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.847333 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrk5d\" (UniqueName: \"kubernetes.io/projected/0277befd-8625-4350-8ed7-c7a756caa729-kube-api-access-lrk5d\") pod \"dnsmasq-dns-c786f88cc-fbqrr\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.881357 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-pdmlr"] Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.882822 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.908696 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-scripts\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.908786 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-config-data\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.908875 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhnb2\" (UniqueName: \"kubernetes.io/projected/84bfeb56-2dcf-420f-9939-b9486ac092ea-kube-api-access-vhnb2\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.908942 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/84bfeb56-2dcf-420f-9939-b9486ac092ea-horizon-secret-key\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.908973 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bfeb56-2dcf-420f-9939-b9486ac092ea-logs\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.921619 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-67mpv" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.922067 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.922299 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 28 13:07:11 crc kubenswrapper[4848]: I0128 13:07:11.946669 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-pdmlr"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016139 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/84bfeb56-2dcf-420f-9939-b9486ac092ea-horizon-secret-key\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016212 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bfeb56-2dcf-420f-9939-b9486ac092ea-logs\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016266 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-scripts\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016370 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnb4r\" (UniqueName: \"kubernetes.io/projected/ceb3076d-8232-44f5-8184-d727ef5c2943-kube-api-access-tnb4r\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016427 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ceb3076d-8232-44f5-8184-d727ef5c2943-etc-machine-id\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016490 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-db-sync-config-data\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016516 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-scripts\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016582 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-combined-ca-bundle\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016615 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-config-data\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016729 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhnb2\" (UniqueName: \"kubernetes.io/projected/84bfeb56-2dcf-420f-9939-b9486ac092ea-kube-api-access-vhnb2\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.016774 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-config-data\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.018028 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-x6pft"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.018358 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bfeb56-2dcf-420f-9939-b9486ac092ea-logs\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.019373 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-scripts\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.019802 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-config-data\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.027168 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.031733 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.031878 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.031949 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/84bfeb56-2dcf-420f-9939-b9486ac092ea-horizon-secret-key\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.057891 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gdl5g" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.059945 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.100238 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.108171 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhnb2\" (UniqueName: \"kubernetes.io/projected/84bfeb56-2dcf-420f-9939-b9486ac092ea-kube-api-access-vhnb2\") pod \"horizon-68c6884989-bcdsh\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.119874 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-combined-ca-bundle\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.119939 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnb4r\" (UniqueName: \"kubernetes.io/projected/ceb3076d-8232-44f5-8184-d727ef5c2943-kube-api-access-tnb4r\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.119970 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ceb3076d-8232-44f5-8184-d727ef5c2943-etc-machine-id\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.119994 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88dde3f6-891e-49d7-a24c-575d166ec790-logs\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.120027 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-db-sync-config-data\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.120059 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-combined-ca-bundle\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.120089 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr2g8\" (UniqueName: \"kubernetes.io/projected/88dde3f6-891e-49d7-a24c-575d166ec790-kube-api-access-nr2g8\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.120110 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-config-data\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.120167 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-scripts\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.120193 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-config-data\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.120221 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-scripts\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.121579 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-x6pft"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.126212 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ceb3076d-8232-44f5-8184-d727ef5c2943-etc-machine-id\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.141599 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-skhpx"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.143278 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.147233 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-db-sync-config-data\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.150972 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-scripts\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.167059 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.167313 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2n7gr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.174230 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-combined-ca-bundle\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.181402 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-config-data\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.196089 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-skhpx"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.202739 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.231891 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqcss\" (UniqueName: \"kubernetes.io/projected/a7100632-3157-40c8-9f9f-a47fcd756ca5-kube-api-access-mqcss\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.232018 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-combined-ca-bundle\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.232070 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr2g8\" (UniqueName: \"kubernetes.io/projected/88dde3f6-891e-49d7-a24c-575d166ec790-kube-api-access-nr2g8\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.232106 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-config-data\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.232147 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-db-sync-config-data\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.232266 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-scripts\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.232370 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-combined-ca-bundle\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.232467 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88dde3f6-891e-49d7-a24c-575d166ec790-logs\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.233143 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88dde3f6-891e-49d7-a24c-575d166ec790-logs\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.243464 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-scripts\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.256507 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-869d8bc777-7mgh8"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.262890 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.276820 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnb4r\" (UniqueName: \"kubernetes.io/projected/ceb3076d-8232-44f5-8184-d727ef5c2943-kube-api-access-tnb4r\") pod \"cinder-db-sync-pdmlr\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.279274 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-config-data\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.281528 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-combined-ca-bundle\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.304908 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr2g8\" (UniqueName: \"kubernetes.io/projected/88dde3f6-891e-49d7-a24c-575d166ec790-kube-api-access-nr2g8\") pod \"placement-db-sync-x6pft\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336502 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-config-data\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336568 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqcss\" (UniqueName: \"kubernetes.io/projected/a7100632-3157-40c8-9f9f-a47fcd756ca5-kube-api-access-mqcss\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336592 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/be500c59-d797-4d7b-9b99-84a72912c669-horizon-secret-key\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336630 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-combined-ca-bundle\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336667 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-scripts\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336695 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnnv7\" (UniqueName: \"kubernetes.io/projected/be500c59-d797-4d7b-9b99-84a72912c669-kube-api-access-cnnv7\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336715 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-db-sync-config-data\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.336731 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be500c59-d797-4d7b-9b99-84a72912c669-logs\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.342729 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-combined-ca-bundle\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.352557 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-db-sync-config-data\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.394186 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqcss\" (UniqueName: \"kubernetes.io/projected/a7100632-3157-40c8-9f9f-a47fcd756ca5-kube-api-access-mqcss\") pod \"barbican-db-sync-skhpx\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.401979 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-869d8bc777-7mgh8"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.426673 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-x6pft" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.441414 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c786f88cc-fbqrr"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.442031 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-scripts\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.442116 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnnv7\" (UniqueName: \"kubernetes.io/projected/be500c59-d797-4d7b-9b99-84a72912c669-kube-api-access-cnnv7\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.442146 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be500c59-d797-4d7b-9b99-84a72912c669-logs\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.442449 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-config-data\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.442520 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/be500c59-d797-4d7b-9b99-84a72912c669-horizon-secret-key\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.457952 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.460625 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.463962 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be500c59-d797-4d7b-9b99-84a72912c669-logs\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.464740 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.465777 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-config-data\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.480918 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-scripts\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.483634 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.494802 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.502219 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-skhpx" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.504579 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/be500c59-d797-4d7b-9b99-84a72912c669-horizon-secret-key\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.511280 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnnv7\" (UniqueName: \"kubernetes.io/projected/be500c59-d797-4d7b-9b99-84a72912c669-kube-api-access-cnnv7\") pod \"horizon-869d8bc777-7mgh8\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.546945 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.560837 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-run-httpd\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.561026 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.561088 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.561184 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-config-data\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.561200 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plhf6\" (UniqueName: \"kubernetes.io/projected/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-kube-api-access-plhf6\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.561553 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-scripts\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.561664 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-log-httpd\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.593311 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77b6dfd897-mp852"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.595278 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.606945 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.617143 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77b6dfd897-mp852"] Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666518 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-log-httpd\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666615 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcf6f\" (UniqueName: \"kubernetes.io/projected/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-kube-api-access-kcf6f\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666655 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-run-httpd\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666680 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-sb\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666748 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666774 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666815 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-config-data\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666834 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plhf6\" (UniqueName: \"kubernetes.io/projected/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-kube-api-access-plhf6\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666866 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-nb\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666896 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-config\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666919 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-svc\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666948 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-swift-storage-0\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.666968 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-scripts\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.674048 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-run-httpd\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.674447 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-log-httpd\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.685093 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-config-data\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.689149 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.690697 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.693463 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-scripts\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.736417 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plhf6\" (UniqueName: \"kubernetes.io/projected/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-kube-api-access-plhf6\") pod \"ceilometer-0\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.769350 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcf6f\" (UniqueName: \"kubernetes.io/projected/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-kube-api-access-kcf6f\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.769429 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-sb\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.769534 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-nb\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.769561 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-config\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.769586 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-svc\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.769620 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-swift-storage-0\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.772165 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-sb\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.777520 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-swift-storage-0\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.777809 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-svc\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.778291 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-config\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.799969 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcf6f\" (UniqueName: \"kubernetes.io/projected/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-kube-api-access-kcf6f\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.825357 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-nb\") pod \"dnsmasq-dns-77b6dfd897-mp852\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.834803 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:07:12 crc kubenswrapper[4848]: I0128 13:07:12.979977 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.092409 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gszqt"] Jan 28 13:07:13 crc kubenswrapper[4848]: W0128 13:07:13.168124 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6ded25e_7c44_40d6_bf27_f23d595c4776.slice/crio-f026ef270d05e033e766b0bb71fd247e37db0a6696b6c8cba7bd914864b629c1 WatchSource:0}: Error finding container f026ef270d05e033e766b0bb71fd247e37db0a6696b6c8cba7bd914864b629c1: Status 404 returned error can't find the container with id f026ef270d05e033e766b0bb71fd247e37db0a6696b6c8cba7bd914864b629c1 Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.227489 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gszqt" event={"ID":"b6ded25e-7c44-40d6-bf27-f23d595c4776","Type":"ContainerStarted","Data":"f026ef270d05e033e766b0bb71fd247e37db0a6696b6c8cba7bd914864b629c1"} Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.499295 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68c6884989-bcdsh"] Jan 28 13:07:13 crc kubenswrapper[4848]: W0128 13:07:13.502697 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84bfeb56_2dcf_420f_9939_b9486ac092ea.slice/crio-92c15b095b852388e90083e6547b5862d7cd79c73468b71a417b3d93606c56fb WatchSource:0}: Error finding container 92c15b095b852388e90083e6547b5862d7cd79c73468b71a417b3d93606c56fb: Status 404 returned error can't find the container with id 92c15b095b852388e90083e6547b5862d7cd79c73468b71a417b3d93606c56fb Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.517300 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c786f88cc-fbqrr"] Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.533467 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-x6pft"] Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.721299 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-pdmlr"] Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.736531 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-skhpx"] Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.748403 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-869d8bc777-7mgh8"] Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.901087 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:07:13 crc kubenswrapper[4848]: I0128 13:07:13.925958 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77b6dfd897-mp852"] Jan 28 13:07:13 crc kubenswrapper[4848]: W0128 13:07:13.946100 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0ac205d_682b_4cb7_b5eb_05a9b1f710ff.slice/crio-2883f3b9a69a8ab9b1ac531683d5ed593037110a08bb6b001cf8859154bc7eb8 WatchSource:0}: Error finding container 2883f3b9a69a8ab9b1ac531683d5ed593037110a08bb6b001cf8859154bc7eb8: Status 404 returned error can't find the container with id 2883f3b9a69a8ab9b1ac531683d5ed593037110a08bb6b001cf8859154bc7eb8 Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.140737 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68c6884989-bcdsh"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.196315 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.220865 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68dd8969dc-2vfc6"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.223860 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.244192 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68dd8969dc-2vfc6"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.280738 4848 generic.go:334] "Generic (PLEG): container finished" podID="0277befd-8625-4350-8ed7-c7a756caa729" containerID="4e28bb10160eb648110fd14c0f219dc2cad5934cd64dc4aa70d7f766fcf5cd38" exitCode=0 Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.280808 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" event={"ID":"0277befd-8625-4350-8ed7-c7a756caa729","Type":"ContainerDied","Data":"4e28bb10160eb648110fd14c0f219dc2cad5934cd64dc4aa70d7f766fcf5cd38"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.280837 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" event={"ID":"0277befd-8625-4350-8ed7-c7a756caa729","Type":"ContainerStarted","Data":"f9769e5992afeacdc072e7275c71ab0d995290733e7fe0c64ed3cfa16148b78b"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.325978 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pdmlr" event={"ID":"ceb3076d-8232-44f5-8184-d727ef5c2943","Type":"ContainerStarted","Data":"b0f2dde9d8b3002d523458aed978f09d742b7c8768e14a72a393b6cc71ee361f"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.327667 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-config-data\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.327808 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-scripts\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.327922 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b8c10aeb-b312-4c6b-aac0-5e5745785034-horizon-secret-key\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.328030 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8c10aeb-b312-4c6b-aac0-5e5745785034-logs\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.328109 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf28t\" (UniqueName: \"kubernetes.io/projected/b8c10aeb-b312-4c6b-aac0-5e5745785034-kube-api-access-lf28t\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.358964 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-nhdf9"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.360740 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.363779 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c898f" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.367152 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.375875 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-skhpx" event={"ID":"a7100632-3157-40c8-9f9f-a47fcd756ca5","Type":"ContainerStarted","Data":"666146865fb1d2b7fc44d283b229e4437822129277ecf795e0b4bc145d96fb31"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.385596 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c6884989-bcdsh" event={"ID":"84bfeb56-2dcf-420f-9939-b9486ac092ea","Type":"ContainerStarted","Data":"92c15b095b852388e90083e6547b5862d7cd79c73468b71a417b3d93606c56fb"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.394022 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869d8bc777-7mgh8" event={"ID":"be500c59-d797-4d7b-9b99-84a72912c669","Type":"ContainerStarted","Data":"dce7037e617d6e284f1683ce19f448d5fd7aa941308d77b6f504fcad94ef8466"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.396494 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-nhdf9"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.398771 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" event={"ID":"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0","Type":"ContainerStarted","Data":"e2f170259864e0b7648093617a430df8a0fa2d243f660c11ee923062338dd913"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.412221 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gszqt" event={"ID":"b6ded25e-7c44-40d6-bf27-f23d595c4776","Type":"ContainerStarted","Data":"e1831ca876ba45efafdbcc3a2920a1ecea5c5b0cf626b909f3798d32419fcaba"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.423619 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerStarted","Data":"2883f3b9a69a8ab9b1ac531683d5ed593037110a08bb6b001cf8859154bc7eb8"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.426564 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-x6pft" event={"ID":"88dde3f6-891e-49d7-a24c-575d166ec790","Type":"ContainerStarted","Data":"cd3ed2c15b556659af4fd9733f07f45d5053f098be2c426424c0cf00471ea261"} Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431542 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8c10aeb-b312-4c6b-aac0-5e5745785034-logs\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431624 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf28t\" (UniqueName: \"kubernetes.io/projected/b8c10aeb-b312-4c6b-aac0-5e5745785034-kube-api-access-lf28t\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431658 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-config-data\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431687 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-combined-ca-bundle\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431728 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgc8n\" (UniqueName: \"kubernetes.io/projected/c9967e9e-d256-4645-be9b-3f3789db9f05-kube-api-access-kgc8n\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431763 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-config-data\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431788 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-scripts\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431816 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-db-sync-config-data\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.431861 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b8c10aeb-b312-4c6b-aac0-5e5745785034-horizon-secret-key\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.432990 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8c10aeb-b312-4c6b-aac0-5e5745785034-logs\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.434142 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-scripts\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.435000 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-config-data\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.440103 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b8c10aeb-b312-4c6b-aac0-5e5745785034-horizon-secret-key\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.442567 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gszqt" podStartSLOduration=3.442540523 podStartE2EDuration="3.442540523s" podCreationTimestamp="2026-01-28 13:07:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:14.435760145 +0000 UTC m=+1261.347977193" watchObservedRunningTime="2026-01-28 13:07:14.442540523 +0000 UTC m=+1261.354757561" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.452025 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf28t\" (UniqueName: \"kubernetes.io/projected/b8c10aeb-b312-4c6b-aac0-5e5745785034-kube-api-access-lf28t\") pod \"horizon-68dd8969dc-2vfc6\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.535601 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-config-data\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.535685 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-combined-ca-bundle\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.535754 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgc8n\" (UniqueName: \"kubernetes.io/projected/c9967e9e-d256-4645-be9b-3f3789db9f05-kube-api-access-kgc8n\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.535810 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-db-sync-config-data\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.543915 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-db-sync-config-data\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.544035 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-config-data\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.544394 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-combined-ca-bundle\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.560424 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgc8n\" (UniqueName: \"kubernetes.io/projected/c9967e9e-d256-4645-be9b-3f3789db9f05-kube-api-access-kgc8n\") pod \"glance-db-sync-nhdf9\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.623473 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.766726 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c898f" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.772804 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nhdf9" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.809544 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-tfd9p"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.813473 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.819062 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.819268 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-29l4b" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.819371 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.826232 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tfd9p"] Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.993968 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwwmf\" (UniqueName: \"kubernetes.io/projected/4f39c6ec-6d59-43de-baef-a3d680b5163f-kube-api-access-dwwmf\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.994079 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-config\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:14 crc kubenswrapper[4848]: I0128 13:07:14.994181 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-combined-ca-bundle\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.014334 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.095582 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-swift-storage-0\") pod \"0277befd-8625-4350-8ed7-c7a756caa729\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096089 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrk5d\" (UniqueName: \"kubernetes.io/projected/0277befd-8625-4350-8ed7-c7a756caa729-kube-api-access-lrk5d\") pod \"0277befd-8625-4350-8ed7-c7a756caa729\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096169 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-svc\") pod \"0277befd-8625-4350-8ed7-c7a756caa729\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096199 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-sb\") pod \"0277befd-8625-4350-8ed7-c7a756caa729\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096416 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-nb\") pod \"0277befd-8625-4350-8ed7-c7a756caa729\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096456 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-config\") pod \"0277befd-8625-4350-8ed7-c7a756caa729\" (UID: \"0277befd-8625-4350-8ed7-c7a756caa729\") " Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096824 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwwmf\" (UniqueName: \"kubernetes.io/projected/4f39c6ec-6d59-43de-baef-a3d680b5163f-kube-api-access-dwwmf\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096876 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-config\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.096930 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-combined-ca-bundle\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.118113 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0277befd-8625-4350-8ed7-c7a756caa729-kube-api-access-lrk5d" (OuterVolumeSpecName: "kube-api-access-lrk5d") pod "0277befd-8625-4350-8ed7-c7a756caa729" (UID: "0277befd-8625-4350-8ed7-c7a756caa729"). InnerVolumeSpecName "kube-api-access-lrk5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.149157 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwwmf\" (UniqueName: \"kubernetes.io/projected/4f39c6ec-6d59-43de-baef-a3d680b5163f-kube-api-access-dwwmf\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.149587 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-combined-ca-bundle\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.150823 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-config" (OuterVolumeSpecName: "config") pod "0277befd-8625-4350-8ed7-c7a756caa729" (UID: "0277befd-8625-4350-8ed7-c7a756caa729"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.162786 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-config\") pod \"neutron-db-sync-tfd9p\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.178238 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0277befd-8625-4350-8ed7-c7a756caa729" (UID: "0277befd-8625-4350-8ed7-c7a756caa729"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.185281 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0277befd-8625-4350-8ed7-c7a756caa729" (UID: "0277befd-8625-4350-8ed7-c7a756caa729"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.191039 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0277befd-8625-4350-8ed7-c7a756caa729" (UID: "0277befd-8625-4350-8ed7-c7a756caa729"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.199142 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.199170 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.199182 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrk5d\" (UniqueName: \"kubernetes.io/projected/0277befd-8625-4350-8ed7-c7a756caa729-kube-api-access-lrk5d\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.199190 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.199203 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.224389 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0277befd-8625-4350-8ed7-c7a756caa729" (UID: "0277befd-8625-4350-8ed7-c7a756caa729"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.301838 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0277befd-8625-4350-8ed7-c7a756caa729-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.324104 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.449565 4848 generic.go:334] "Generic (PLEG): container finished" podID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerID="03bb61c16e9e91b9fbffc4057dbf199348c1708c63a18e541cfc1a85f8cc6479" exitCode=0 Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.449921 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" event={"ID":"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0","Type":"ContainerDied","Data":"03bb61c16e9e91b9fbffc4057dbf199348c1708c63a18e541cfc1a85f8cc6479"} Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.456650 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68dd8969dc-2vfc6"] Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.461982 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" event={"ID":"0277befd-8625-4350-8ed7-c7a756caa729","Type":"ContainerDied","Data":"f9769e5992afeacdc072e7275c71ab0d995290733e7fe0c64ed3cfa16148b78b"} Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.462092 4848 scope.go:117] "RemoveContainer" containerID="4e28bb10160eb648110fd14c0f219dc2cad5934cd64dc4aa70d7f766fcf5cd38" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.462023 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c786f88cc-fbqrr" Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.630056 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c786f88cc-fbqrr"] Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.643849 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c786f88cc-fbqrr"] Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.741722 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-nhdf9"] Jan 28 13:07:15 crc kubenswrapper[4848]: W0128 13:07:15.852820 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9967e9e_d256_4645_be9b_3f3789db9f05.slice/crio-4ff566c73a7bba7e8410f1df88750fa087ed235b6c93783b086d5a4ac44a550c WatchSource:0}: Error finding container 4ff566c73a7bba7e8410f1df88750fa087ed235b6c93783b086d5a4ac44a550c: Status 404 returned error can't find the container with id 4ff566c73a7bba7e8410f1df88750fa087ed235b6c93783b086d5a4ac44a550c Jan 28 13:07:15 crc kubenswrapper[4848]: I0128 13:07:15.981209 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tfd9p"] Jan 28 13:07:16 crc kubenswrapper[4848]: W0128 13:07:16.003238 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f39c6ec_6d59_43de_baef_a3d680b5163f.slice/crio-a43baa7b2cff21ee3f8e893ab0d4c14db2af8fc6e37736a0f38f0819e0fcf787 WatchSource:0}: Error finding container a43baa7b2cff21ee3f8e893ab0d4c14db2af8fc6e37736a0f38f0819e0fcf787: Status 404 returned error can't find the container with id a43baa7b2cff21ee3f8e893ab0d4c14db2af8fc6e37736a0f38f0819e0fcf787 Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.482935 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nhdf9" event={"ID":"c9967e9e-d256-4645-be9b-3f3789db9f05","Type":"ContainerStarted","Data":"4ff566c73a7bba7e8410f1df88750fa087ed235b6c93783b086d5a4ac44a550c"} Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.487571 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tfd9p" event={"ID":"4f39c6ec-6d59-43de-baef-a3d680b5163f","Type":"ContainerStarted","Data":"e157395a9e088032fe378f93954f217a8dcebf89352c0ad26608856fb015c7c3"} Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.487677 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tfd9p" event={"ID":"4f39c6ec-6d59-43de-baef-a3d680b5163f","Type":"ContainerStarted","Data":"a43baa7b2cff21ee3f8e893ab0d4c14db2af8fc6e37736a0f38f0819e0fcf787"} Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.493018 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68dd8969dc-2vfc6" event={"ID":"b8c10aeb-b312-4c6b-aac0-5e5745785034","Type":"ContainerStarted","Data":"6c774f16055c3bdd1af09687806cd637880bfce8bd043d9943ddc7177c6428db"} Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.497015 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" event={"ID":"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0","Type":"ContainerStarted","Data":"1092527bac3025c100fadaa6eeba10cdf333a204f1d232dfc529425a221b958d"} Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.498203 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.511990 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-tfd9p" podStartSLOduration=2.511962565 podStartE2EDuration="2.511962565s" podCreationTimestamp="2026-01-28 13:07:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:16.50780945 +0000 UTC m=+1263.420026488" watchObservedRunningTime="2026-01-28 13:07:16.511962565 +0000 UTC m=+1263.424179603" Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.514797 4848 generic.go:334] "Generic (PLEG): container finished" podID="08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" containerID="7e61758ab3cc03fb128970cfbe4a697455b6432ebb9504212a0242a33e107169" exitCode=0 Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.514877 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qhcv6" event={"ID":"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7","Type":"ContainerDied","Data":"7e61758ab3cc03fb128970cfbe4a697455b6432ebb9504212a0242a33e107169"} Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.562286 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" podStartSLOduration=4.562240573 podStartE2EDuration="4.562240573s" podCreationTimestamp="2026-01-28 13:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:16.541525262 +0000 UTC m=+1263.453742300" watchObservedRunningTime="2026-01-28 13:07:16.562240573 +0000 UTC m=+1263.474457601" Jan 28 13:07:16 crc kubenswrapper[4848]: I0128 13:07:16.876958 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0277befd-8625-4350-8ed7-c7a756caa729" path="/var/lib/kubelet/pods/0277befd-8625-4350-8ed7-c7a756caa729/volumes" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.464716 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.573238 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qhcv6" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.573579 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qhcv6" event={"ID":"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7","Type":"ContainerDied","Data":"27a0b790a859cc6bbca440958bbdeee9552d0a71ff1615f9a7c8ebb4fd2857f1"} Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.573626 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27a0b790a859cc6bbca440958bbdeee9552d0a71ff1615f9a7c8ebb4fd2857f1" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.653901 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-combined-ca-bundle\") pod \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.653982 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sx64c\" (UniqueName: \"kubernetes.io/projected/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-kube-api-access-sx64c\") pod \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.654117 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-config-data\") pod \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.654173 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-db-sync-config-data\") pod \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\" (UID: \"08c6f464-d9ea-4ced-bfd0-498fcca6e0c7\") " Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.670873 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" (UID: "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.673306 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-kube-api-access-sx64c" (OuterVolumeSpecName: "kube-api-access-sx64c") pod "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" (UID: "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7"). InnerVolumeSpecName "kube-api-access-sx64c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.684327 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" (UID: "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.715170 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-config-data" (OuterVolumeSpecName: "config-data") pod "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" (UID: "08c6f464-d9ea-4ced-bfd0-498fcca6e0c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.758371 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sx64c\" (UniqueName: \"kubernetes.io/projected/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-kube-api-access-sx64c\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.758414 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.758427 4848 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.758439 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.889160 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:07:18 crc kubenswrapper[4848]: E0128 13:07:18.889525 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" containerName="watcher-db-sync" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.889547 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" containerName="watcher-db-sync" Jan 28 13:07:18 crc kubenswrapper[4848]: E0128 13:07:18.889564 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0277befd-8625-4350-8ed7-c7a756caa729" containerName="init" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.889570 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0277befd-8625-4350-8ed7-c7a756caa729" containerName="init" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.889774 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" containerName="watcher-db-sync" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.889809 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0277befd-8625-4350-8ed7-c7a756caa729" containerName="init" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.890855 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.894943 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.897775 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-bzpjw" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.897996 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.979053 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.982324 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 28 13:07:18 crc kubenswrapper[4848]: I0128 13:07:18.985665 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.009401 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.070041 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.071814 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4d84124-baee-447b-99c9-713b3d13b205-logs\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.072492 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.072541 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-config-data\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.072573 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7nwr\" (UniqueName: \"kubernetes.io/projected/a4d84124-baee-447b-99c9-713b3d13b205-kube-api-access-m7nwr\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.072622 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.074146 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.098400 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.119941 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.175648 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7811364-7959-428c-8be5-751c4b25f597-config-data\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.175762 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4d84124-baee-447b-99c9-713b3d13b205-logs\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.175855 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-config-data\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.175880 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ljdh\" (UniqueName: \"kubernetes.io/projected/b7811364-7959-428c-8be5-751c4b25f597-kube-api-access-5ljdh\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.175908 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7811364-7959-428c-8be5-751c4b25f597-logs\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.175964 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.175997 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-config-data\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176022 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7nwr\" (UniqueName: \"kubernetes.io/projected/a4d84124-baee-447b-99c9-713b3d13b205-kube-api-access-m7nwr\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176071 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176095 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5cjn\" (UniqueName: \"kubernetes.io/projected/88151fad-4442-4d32-a675-f89f070ed086-kube-api-access-h5cjn\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176122 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176235 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88151fad-4442-4d32-a675-f89f070ed086-logs\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176435 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176484 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7811364-7959-428c-8be5-751c4b25f597-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.176860 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4d84124-baee-447b-99c9-713b3d13b205-logs\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.193413 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.193453 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-config-data\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.197944 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.200909 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7nwr\" (UniqueName: \"kubernetes.io/projected/a4d84124-baee-447b-99c9-713b3d13b205-kube-api-access-m7nwr\") pod \"watcher-api-0\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.245283 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.280478 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.280564 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5cjn\" (UniqueName: \"kubernetes.io/projected/88151fad-4442-4d32-a675-f89f070ed086-kube-api-access-h5cjn\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.280782 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88151fad-4442-4d32-a675-f89f070ed086-logs\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.281063 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.281099 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7811364-7959-428c-8be5-751c4b25f597-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.281163 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7811364-7959-428c-8be5-751c4b25f597-config-data\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.281385 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88151fad-4442-4d32-a675-f89f070ed086-logs\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.281822 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-config-data\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.282007 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ljdh\" (UniqueName: \"kubernetes.io/projected/b7811364-7959-428c-8be5-751c4b25f597-kube-api-access-5ljdh\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.282064 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7811364-7959-428c-8be5-751c4b25f597-logs\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.282990 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7811364-7959-428c-8be5-751c4b25f597-logs\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.286222 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.286639 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.287077 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-config-data\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.287829 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7811364-7959-428c-8be5-751c4b25f597-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.288187 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7811364-7959-428c-8be5-751c4b25f597-config-data\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.304222 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ljdh\" (UniqueName: \"kubernetes.io/projected/b7811364-7959-428c-8be5-751c4b25f597-kube-api-access-5ljdh\") pod \"watcher-applier-0\" (UID: \"b7811364-7959-428c-8be5-751c4b25f597\") " pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.314203 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5cjn\" (UniqueName: \"kubernetes.io/projected/88151fad-4442-4d32-a675-f89f070ed086-kube-api-access-h5cjn\") pod \"watcher-decision-engine-0\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.314867 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.426812 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.844473 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:07:19 crc kubenswrapper[4848]: I0128 13:07:19.946723 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:07:20 crc kubenswrapper[4848]: I0128 13:07:20.030894 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 28 13:07:20 crc kubenswrapper[4848]: W0128 13:07:20.606983 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88151fad_4442_4d32_a675_f89f070ed086.slice/crio-b6c3c8f29aa01731f355045457e1becd67b5e1ea57ddf758ea5483e25f3c40a5 WatchSource:0}: Error finding container b6c3c8f29aa01731f355045457e1becd67b5e1ea57ddf758ea5483e25f3c40a5: Status 404 returned error can't find the container with id b6c3c8f29aa01731f355045457e1becd67b5e1ea57ddf758ea5483e25f3c40a5 Jan 28 13:07:20 crc kubenswrapper[4848]: I0128 13:07:20.607877 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"a4d84124-baee-447b-99c9-713b3d13b205","Type":"ContainerStarted","Data":"c632903c39db540a0a12c6832254545e0ba73fe635c58b71486415f87d46560a"} Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.063941 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-869d8bc777-7mgh8"] Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.106149 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-57844b64c8-6jpl8"] Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.111641 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.116491 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.126312 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57844b64c8-6jpl8"] Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.134202 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzxbr\" (UniqueName: \"kubernetes.io/projected/6b5c0550-a7fd-430e-991f-9eccf00522e2-kube-api-access-vzxbr\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.134307 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-tls-certs\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.134373 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-secret-key\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.134495 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-combined-ca-bundle\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.134558 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-scripts\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.134583 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b5c0550-a7fd-430e-991f-9eccf00522e2-logs\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.134696 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-config-data\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.219367 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68dd8969dc-2vfc6"] Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.233715 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68f5655b9d-76qsp"] Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.235891 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.237108 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-combined-ca-bundle\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.237201 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-scripts\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.237235 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b5c0550-a7fd-430e-991f-9eccf00522e2-logs\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.237318 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-config-data\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.237380 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzxbr\" (UniqueName: \"kubernetes.io/projected/6b5c0550-a7fd-430e-991f-9eccf00522e2-kube-api-access-vzxbr\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.237406 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-tls-certs\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.237446 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-secret-key\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.239024 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-scripts\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.247209 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-tls-certs\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.247563 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-config-data\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.248192 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68f5655b9d-76qsp"] Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.248297 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b5c0550-a7fd-430e-991f-9eccf00522e2-logs\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.251621 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-combined-ca-bundle\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.254710 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-secret-key\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.275452 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzxbr\" (UniqueName: \"kubernetes.io/projected/6b5c0550-a7fd-430e-991f-9eccf00522e2-kube-api-access-vzxbr\") pod \"horizon-57844b64c8-6jpl8\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.339236 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dfa56dc1-1635-454c-95e0-74fdedcf8b00-logs\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.339380 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-horizon-secret-key\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.339484 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfl27\" (UniqueName: \"kubernetes.io/projected/dfa56dc1-1635-454c-95e0-74fdedcf8b00-kube-api-access-vfl27\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.339518 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dfa56dc1-1635-454c-95e0-74fdedcf8b00-scripts\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.339573 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-horizon-tls-certs\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.339609 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dfa56dc1-1635-454c-95e0-74fdedcf8b00-config-data\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.339661 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-combined-ca-bundle\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.445586 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-horizon-tls-certs\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.445659 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dfa56dc1-1635-454c-95e0-74fdedcf8b00-config-data\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.445701 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-combined-ca-bundle\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.445743 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dfa56dc1-1635-454c-95e0-74fdedcf8b00-logs\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.445771 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-horizon-secret-key\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.445837 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfl27\" (UniqueName: \"kubernetes.io/projected/dfa56dc1-1635-454c-95e0-74fdedcf8b00-kube-api-access-vfl27\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.445869 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dfa56dc1-1635-454c-95e0-74fdedcf8b00-scripts\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.450491 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dfa56dc1-1635-454c-95e0-74fdedcf8b00-scripts\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.451673 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dfa56dc1-1635-454c-95e0-74fdedcf8b00-logs\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.453243 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dfa56dc1-1635-454c-95e0-74fdedcf8b00-config-data\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.455524 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-horizon-tls-certs\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.458163 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-combined-ca-bundle\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.459074 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dfa56dc1-1635-454c-95e0-74fdedcf8b00-horizon-secret-key\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.473582 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfl27\" (UniqueName: \"kubernetes.io/projected/dfa56dc1-1635-454c-95e0-74fdedcf8b00-kube-api-access-vfl27\") pod \"horizon-68f5655b9d-76qsp\" (UID: \"dfa56dc1-1635-454c-95e0-74fdedcf8b00\") " pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.541050 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.577100 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.635537 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerStarted","Data":"b6c3c8f29aa01731f355045457e1becd67b5e1ea57ddf758ea5483e25f3c40a5"} Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.638112 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"a4d84124-baee-447b-99c9-713b3d13b205","Type":"ContainerStarted","Data":"2d1c5f21784660bf9afb2e4dd942320344fe1759c608ff998a970a68989b068f"} Jan 28 13:07:21 crc kubenswrapper[4848]: I0128 13:07:21.640619 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"b7811364-7959-428c-8be5-751c4b25f597","Type":"ContainerStarted","Data":"52e3e59dcf880a7bb4faae332b1cfae9ca93c4edbf6822ba0f58095fa0964014"} Jan 28 13:07:22 crc kubenswrapper[4848]: I0128 13:07:22.659564 4848 generic.go:334] "Generic (PLEG): container finished" podID="b6ded25e-7c44-40d6-bf27-f23d595c4776" containerID="e1831ca876ba45efafdbcc3a2920a1ecea5c5b0cf626b909f3798d32419fcaba" exitCode=0 Jan 28 13:07:22 crc kubenswrapper[4848]: I0128 13:07:22.659640 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gszqt" event={"ID":"b6ded25e-7c44-40d6-bf27-f23d595c4776","Type":"ContainerDied","Data":"e1831ca876ba45efafdbcc3a2920a1ecea5c5b0cf626b909f3798d32419fcaba"} Jan 28 13:07:22 crc kubenswrapper[4848]: I0128 13:07:22.983442 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:07:23 crc kubenswrapper[4848]: I0128 13:07:23.058565 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b5fd78997-2tm5l"] Jan 28 13:07:23 crc kubenswrapper[4848]: I0128 13:07:23.058879 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" containerID="cri-o://74665a5e907557d7da26720b8da3e6482ef23c04552e5a52e296ffd9fc5b505c" gracePeriod=10 Jan 28 13:07:23 crc kubenswrapper[4848]: I0128 13:07:23.672083 4848 generic.go:334] "Generic (PLEG): container finished" podID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerID="74665a5e907557d7da26720b8da3e6482ef23c04552e5a52e296ffd9fc5b505c" exitCode=0 Jan 28 13:07:23 crc kubenswrapper[4848]: I0128 13:07:23.672422 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" event={"ID":"cf32a5d4-51d4-45a4-973f-c47bee280747","Type":"ContainerDied","Data":"74665a5e907557d7da26720b8da3e6482ef23c04552e5a52e296ffd9fc5b505c"} Jan 28 13:07:24 crc kubenswrapper[4848]: I0128 13:07:24.350584 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: connect: connection refused" Jan 28 13:07:32 crc kubenswrapper[4848]: E0128 13:07:32.221177 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 28 13:07:32 crc kubenswrapper[4848]: E0128 13:07:32.222148 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 28 13:07:32 crc kubenswrapper[4848]: E0128 13:07:32.222458 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n568h4h5fbh78h685h646h67dhbch646h68ch688h5cbhf4h669hc7h8fh5f6h665h598h9dhffh567h64fhch7fh678h586h5b8hc6h59bh7ch696q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cnnv7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-869d8bc777-7mgh8_openstack(be500c59-d797-4d7b-9b99-84a72912c669): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:32 crc kubenswrapper[4848]: E0128 13:07:32.226196 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-869d8bc777-7mgh8" podUID="be500c59-d797-4d7b-9b99-84a72912c669" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.837131 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.837752 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.838414 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c7h689h5c7h588h5fdh68fh5c5hc4h78h5f7h555h67dhdfh688h5d6h675h5fdh588h666h645h574h5f7h8bhc8h5ddh679h7dh58ch67bh5fdhcch5cq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vhnb2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-68c6884989-bcdsh_openstack(84bfeb56-2dcf-420f-9939-b9486ac092ea): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.840657 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-68c6884989-bcdsh" podUID="84bfeb56-2dcf-420f-9939-b9486ac092ea" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.845778 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.845846 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.846015 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf8h64bhdfh59h65bh5dfh88h65h644h574h5d8h55fhbh555h5b5hcchb5h57dh598hd7hch545h54fhb9h577hf6h6dhf7h9chdh76h55q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lf28t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-68dd8969dc-2vfc6_openstack(b8c10aeb-b312-4c6b-aac0-5e5745785034): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:33 crc kubenswrapper[4848]: E0128 13:07:33.848372 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-68dd8969dc-2vfc6" podUID="b8c10aeb-b312-4c6b-aac0-5e5745785034" Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.915784 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.975803 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbltl\" (UniqueName: \"kubernetes.io/projected/b6ded25e-7c44-40d6-bf27-f23d595c4776-kube-api-access-gbltl\") pod \"b6ded25e-7c44-40d6-bf27-f23d595c4776\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.976046 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-config-data\") pod \"b6ded25e-7c44-40d6-bf27-f23d595c4776\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.976123 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-combined-ca-bundle\") pod \"b6ded25e-7c44-40d6-bf27-f23d595c4776\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.976207 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-credential-keys\") pod \"b6ded25e-7c44-40d6-bf27-f23d595c4776\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.976320 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-fernet-keys\") pod \"b6ded25e-7c44-40d6-bf27-f23d595c4776\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.976728 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-scripts\") pod \"b6ded25e-7c44-40d6-bf27-f23d595c4776\" (UID: \"b6ded25e-7c44-40d6-bf27-f23d595c4776\") " Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.983334 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6ded25e-7c44-40d6-bf27-f23d595c4776-kube-api-access-gbltl" (OuterVolumeSpecName: "kube-api-access-gbltl") pod "b6ded25e-7c44-40d6-bf27-f23d595c4776" (UID: "b6ded25e-7c44-40d6-bf27-f23d595c4776"). InnerVolumeSpecName "kube-api-access-gbltl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.984684 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-scripts" (OuterVolumeSpecName: "scripts") pod "b6ded25e-7c44-40d6-bf27-f23d595c4776" (UID: "b6ded25e-7c44-40d6-bf27-f23d595c4776"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:33 crc kubenswrapper[4848]: I0128 13:07:33.985035 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b6ded25e-7c44-40d6-bf27-f23d595c4776" (UID: "b6ded25e-7c44-40d6-bf27-f23d595c4776"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.001120 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b6ded25e-7c44-40d6-bf27-f23d595c4776" (UID: "b6ded25e-7c44-40d6-bf27-f23d595c4776"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.005496 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-config-data" (OuterVolumeSpecName: "config-data") pod "b6ded25e-7c44-40d6-bf27-f23d595c4776" (UID: "b6ded25e-7c44-40d6-bf27-f23d595c4776"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.009895 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6ded25e-7c44-40d6-bf27-f23d595c4776" (UID: "b6ded25e-7c44-40d6-bf27-f23d595c4776"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.079133 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbltl\" (UniqueName: \"kubernetes.io/projected/b6ded25e-7c44-40d6-bf27-f23d595c4776-kube-api-access-gbltl\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.079176 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.079186 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.079194 4848 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.079203 4848 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.079212 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6ded25e-7c44-40d6-bf27-f23d595c4776-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:34 crc kubenswrapper[4848]: E0128 13:07:34.261080 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Jan 28 13:07:34 crc kubenswrapper[4848]: E0128 13:07:34.261174 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Jan 28 13:07:34 crc kubenswrapper[4848]: E0128 13:07:34.261412 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:38.102.83.20:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5bbh557h5bdh694hbch5b7h5d8h5b5h95h56bh5d9h676hf4hd4h54dh59bhf5h577h5h587h86h99h8hfch58dh5c9h667h5f6h8ch654h666h68q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-plhf6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(b0ac205d-682b-4cb7-b5eb-05a9b1f710ff): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.350701 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.808375 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gszqt" event={"ID":"b6ded25e-7c44-40d6-bf27-f23d595c4776","Type":"ContainerDied","Data":"f026ef270d05e033e766b0bb71fd247e37db0a6696b6c8cba7bd914864b629c1"} Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.808454 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f026ef270d05e033e766b0bb71fd247e37db0a6696b6c8cba7bd914864b629c1" Jan 28 13:07:34 crc kubenswrapper[4848]: I0128 13:07:34.808596 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gszqt" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.015943 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gszqt"] Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.026834 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gszqt"] Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.124013 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-x5jfk"] Jan 28 13:07:35 crc kubenswrapper[4848]: E0128 13:07:35.124708 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6ded25e-7c44-40d6-bf27-f23d595c4776" containerName="keystone-bootstrap" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.124731 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6ded25e-7c44-40d6-bf27-f23d595c4776" containerName="keystone-bootstrap" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.125071 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6ded25e-7c44-40d6-bf27-f23d595c4776" containerName="keystone-bootstrap" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.126150 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.135831 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.135947 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.136128 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.135951 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-x5jfk"] Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.136917 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.142326 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ffggt" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.214519 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-scripts\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.214622 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-combined-ca-bundle\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.214655 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-config-data\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.214714 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-credential-keys\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.214896 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgbtq\" (UniqueName: \"kubernetes.io/projected/a5436ced-61f3-4be7-ac99-690c2b58939d-kube-api-access-zgbtq\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.214946 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-fernet-keys\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.317676 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgbtq\" (UniqueName: \"kubernetes.io/projected/a5436ced-61f3-4be7-ac99-690c2b58939d-kube-api-access-zgbtq\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.317755 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-fernet-keys\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.317875 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-scripts\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.317923 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-combined-ca-bundle\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.317947 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-config-data\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.317990 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-credential-keys\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.324833 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-scripts\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.326773 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-combined-ca-bundle\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.327777 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-credential-keys\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.329923 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-fernet-keys\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.337002 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-config-data\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.337684 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgbtq\" (UniqueName: \"kubernetes.io/projected/a5436ced-61f3-4be7-ac99-690c2b58939d-kube-api-access-zgbtq\") pod \"keystone-bootstrap-x5jfk\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:35 crc kubenswrapper[4848]: I0128 13:07:35.459939 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:07:36 crc kubenswrapper[4848]: I0128 13:07:36.864003 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6ded25e-7c44-40d6-bf27-f23d595c4776" path="/var/lib/kubelet/pods/b6ded25e-7c44-40d6-bf27-f23d595c4776/volumes" Jan 28 13:07:37 crc kubenswrapper[4848]: I0128 13:07:37.925208 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:07:37 crc kubenswrapper[4848]: I0128 13:07:37.925310 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:07:37 crc kubenswrapper[4848]: I0128 13:07:37.925365 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:07:37 crc kubenswrapper[4848]: I0128 13:07:37.926468 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"549672e6f36f329f8d879da83cfb4972802790c5bf74c410e2275cf97e32bb6c"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:07:37 crc kubenswrapper[4848]: I0128 13:07:37.926560 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://549672e6f36f329f8d879da83cfb4972802790c5bf74c410e2275cf97e32bb6c" gracePeriod=600 Jan 28 13:07:39 crc kubenswrapper[4848]: I0128 13:07:39.351152 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Jan 28 13:07:39 crc kubenswrapper[4848]: I0128 13:07:39.351890 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:07:39 crc kubenswrapper[4848]: I0128 13:07:39.862151 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="549672e6f36f329f8d879da83cfb4972802790c5bf74c410e2275cf97e32bb6c" exitCode=0 Jan 28 13:07:39 crc kubenswrapper[4848]: I0128 13:07:39.862211 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"549672e6f36f329f8d879da83cfb4972802790c5bf74c410e2275cf97e32bb6c"} Jan 28 13:07:39 crc kubenswrapper[4848]: I0128 13:07:39.862280 4848 scope.go:117] "RemoveContainer" containerID="875a982e7db5cc44931d699a4c51480a5860a252ccb155a317028cb1da4c99e1" Jan 28 13:07:44 crc kubenswrapper[4848]: I0128 13:07:44.352313 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Jan 28 13:07:46 crc kubenswrapper[4848]: E0128 13:07:46.332982 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-glance-api:watcher_latest" Jan 28 13:07:46 crc kubenswrapper[4848]: E0128 13:07:46.333060 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-glance-api:watcher_latest" Jan 28 13:07:46 crc kubenswrapper[4848]: E0128 13:07:46.333307 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:38.102.83.20:5001/podified-master-centos10/openstack-glance-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kgc8n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-nhdf9_openstack(c9967e9e-d256-4645-be9b-3f3789db9f05): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:46 crc kubenswrapper[4848]: E0128 13:07:46.334487 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-nhdf9" podUID="c9967e9e-d256-4645-be9b-3f3789db9f05" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.416334 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.422198 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bfeb56-2dcf-420f-9939-b9486ac092ea-logs\") pod \"84bfeb56-2dcf-420f-9939-b9486ac092ea\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.422379 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-config-data\") pod \"84bfeb56-2dcf-420f-9939-b9486ac092ea\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.422411 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/84bfeb56-2dcf-420f-9939-b9486ac092ea-horizon-secret-key\") pod \"84bfeb56-2dcf-420f-9939-b9486ac092ea\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.422438 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhnb2\" (UniqueName: \"kubernetes.io/projected/84bfeb56-2dcf-420f-9939-b9486ac092ea-kube-api-access-vhnb2\") pod \"84bfeb56-2dcf-420f-9939-b9486ac092ea\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.422512 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-scripts\") pod \"84bfeb56-2dcf-420f-9939-b9486ac092ea\" (UID: \"84bfeb56-2dcf-420f-9939-b9486ac092ea\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.422618 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84bfeb56-2dcf-420f-9939-b9486ac092ea-logs" (OuterVolumeSpecName: "logs") pod "84bfeb56-2dcf-420f-9939-b9486ac092ea" (UID: "84bfeb56-2dcf-420f-9939-b9486ac092ea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.423114 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-scripts" (OuterVolumeSpecName: "scripts") pod "84bfeb56-2dcf-420f-9939-b9486ac092ea" (UID: "84bfeb56-2dcf-420f-9939-b9486ac092ea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.423211 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-config-data" (OuterVolumeSpecName: "config-data") pod "84bfeb56-2dcf-420f-9939-b9486ac092ea" (UID: "84bfeb56-2dcf-420f-9939-b9486ac092ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.423353 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bfeb56-2dcf-420f-9939-b9486ac092ea-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.423380 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.423392 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84bfeb56-2dcf-420f-9939-b9486ac092ea-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.428519 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.429489 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84bfeb56-2dcf-420f-9939-b9486ac092ea-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "84bfeb56-2dcf-420f-9939-b9486ac092ea" (UID: "84bfeb56-2dcf-420f-9939-b9486ac092ea"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.430160 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84bfeb56-2dcf-420f-9939-b9486ac092ea-kube-api-access-vhnb2" (OuterVolumeSpecName: "kube-api-access-vhnb2") pod "84bfeb56-2dcf-420f-9939-b9486ac092ea" (UID: "84bfeb56-2dcf-420f-9939-b9486ac092ea"). InnerVolumeSpecName "kube-api-access-vhnb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.506709 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.524764 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-nb\") pod \"cf32a5d4-51d4-45a4-973f-c47bee280747\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.524966 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b8c10aeb-b312-4c6b-aac0-5e5745785034-horizon-secret-key\") pod \"b8c10aeb-b312-4c6b-aac0-5e5745785034\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525032 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqslv\" (UniqueName: \"kubernetes.io/projected/cf32a5d4-51d4-45a4-973f-c47bee280747-kube-api-access-tqslv\") pod \"cf32a5d4-51d4-45a4-973f-c47bee280747\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525053 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-swift-storage-0\") pod \"cf32a5d4-51d4-45a4-973f-c47bee280747\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525126 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-config-data\") pod \"b8c10aeb-b312-4c6b-aac0-5e5745785034\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525469 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-sb\") pod \"cf32a5d4-51d4-45a4-973f-c47bee280747\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525506 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-scripts\") pod \"b8c10aeb-b312-4c6b-aac0-5e5745785034\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525555 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-config\") pod \"cf32a5d4-51d4-45a4-973f-c47bee280747\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525637 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-svc\") pod \"cf32a5d4-51d4-45a4-973f-c47bee280747\" (UID: \"cf32a5d4-51d4-45a4-973f-c47bee280747\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525752 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8c10aeb-b312-4c6b-aac0-5e5745785034-logs\") pod \"b8c10aeb-b312-4c6b-aac0-5e5745785034\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.525851 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lf28t\" (UniqueName: \"kubernetes.io/projected/b8c10aeb-b312-4c6b-aac0-5e5745785034-kube-api-access-lf28t\") pod \"b8c10aeb-b312-4c6b-aac0-5e5745785034\" (UID: \"b8c10aeb-b312-4c6b-aac0-5e5745785034\") " Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.526514 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-config-data" (OuterVolumeSpecName: "config-data") pod "b8c10aeb-b312-4c6b-aac0-5e5745785034" (UID: "b8c10aeb-b312-4c6b-aac0-5e5745785034"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.526875 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.526901 4848 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/84bfeb56-2dcf-420f-9939-b9486ac092ea-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.526914 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhnb2\" (UniqueName: \"kubernetes.io/projected/84bfeb56-2dcf-420f-9939-b9486ac092ea-kube-api-access-vhnb2\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.528004 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8c10aeb-b312-4c6b-aac0-5e5745785034-logs" (OuterVolumeSpecName: "logs") pod "b8c10aeb-b312-4c6b-aac0-5e5745785034" (UID: "b8c10aeb-b312-4c6b-aac0-5e5745785034"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.529307 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-scripts" (OuterVolumeSpecName: "scripts") pod "b8c10aeb-b312-4c6b-aac0-5e5745785034" (UID: "b8c10aeb-b312-4c6b-aac0-5e5745785034"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.533022 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8c10aeb-b312-4c6b-aac0-5e5745785034-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b8c10aeb-b312-4c6b-aac0-5e5745785034" (UID: "b8c10aeb-b312-4c6b-aac0-5e5745785034"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.535889 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8c10aeb-b312-4c6b-aac0-5e5745785034-kube-api-access-lf28t" (OuterVolumeSpecName: "kube-api-access-lf28t") pod "b8c10aeb-b312-4c6b-aac0-5e5745785034" (UID: "b8c10aeb-b312-4c6b-aac0-5e5745785034"). InnerVolumeSpecName "kube-api-access-lf28t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.537566 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf32a5d4-51d4-45a4-973f-c47bee280747-kube-api-access-tqslv" (OuterVolumeSpecName: "kube-api-access-tqslv") pod "cf32a5d4-51d4-45a4-973f-c47bee280747" (UID: "cf32a5d4-51d4-45a4-973f-c47bee280747"). InnerVolumeSpecName "kube-api-access-tqslv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.582605 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cf32a5d4-51d4-45a4-973f-c47bee280747" (UID: "cf32a5d4-51d4-45a4-973f-c47bee280747"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.583395 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-config" (OuterVolumeSpecName: "config") pod "cf32a5d4-51d4-45a4-973f-c47bee280747" (UID: "cf32a5d4-51d4-45a4-973f-c47bee280747"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.604961 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cf32a5d4-51d4-45a4-973f-c47bee280747" (UID: "cf32a5d4-51d4-45a4-973f-c47bee280747"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.608492 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cf32a5d4-51d4-45a4-973f-c47bee280747" (UID: "cf32a5d4-51d4-45a4-973f-c47bee280747"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.627938 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cf32a5d4-51d4-45a4-973f-c47bee280747" (UID: "cf32a5d4-51d4-45a4-973f-c47bee280747"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628648 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8c10aeb-b312-4c6b-aac0-5e5745785034-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628667 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lf28t\" (UniqueName: \"kubernetes.io/projected/b8c10aeb-b312-4c6b-aac0-5e5745785034-kube-api-access-lf28t\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628679 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628690 4848 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b8c10aeb-b312-4c6b-aac0-5e5745785034-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628700 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqslv\" (UniqueName: \"kubernetes.io/projected/cf32a5d4-51d4-45a4-973f-c47bee280747-kube-api-access-tqslv\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628710 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628718 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628727 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b8c10aeb-b312-4c6b-aac0-5e5745785034-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628737 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.628745 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf32a5d4-51d4-45a4-973f-c47bee280747-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.932891 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68dd8969dc-2vfc6" event={"ID":"b8c10aeb-b312-4c6b-aac0-5e5745785034","Type":"ContainerDied","Data":"6c774f16055c3bdd1af09687806cd637880bfce8bd043d9943ddc7177c6428db"} Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.933036 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68dd8969dc-2vfc6" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.936832 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c6884989-bcdsh" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.936919 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c6884989-bcdsh" event={"ID":"84bfeb56-2dcf-420f-9939-b9486ac092ea","Type":"ContainerDied","Data":"92c15b095b852388e90083e6547b5862d7cd79c73468b71a417b3d93606c56fb"} Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.940651 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" Jan 28 13:07:46 crc kubenswrapper[4848]: I0128 13:07:46.940796 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" event={"ID":"cf32a5d4-51d4-45a4-973f-c47bee280747","Type":"ContainerDied","Data":"e950c07b94ee14bd3834d01f471154cb6b6aa875de3e1bd814ac01e6ab3afb8a"} Jan 28 13:07:46 crc kubenswrapper[4848]: E0128 13:07:46.944030 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-glance-api:watcher_latest\\\"\"" pod="openstack/glance-db-sync-nhdf9" podUID="c9967e9e-d256-4645-be9b-3f3789db9f05" Jan 28 13:07:47 crc kubenswrapper[4848]: I0128 13:07:47.032137 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68dd8969dc-2vfc6"] Jan 28 13:07:47 crc kubenswrapper[4848]: I0128 13:07:47.042235 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-68dd8969dc-2vfc6"] Jan 28 13:07:47 crc kubenswrapper[4848]: I0128 13:07:47.052727 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b5fd78997-2tm5l"] Jan 28 13:07:47 crc kubenswrapper[4848]: I0128 13:07:47.069962 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b5fd78997-2tm5l"] Jan 28 13:07:47 crc kubenswrapper[4848]: I0128 13:07:47.091714 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68c6884989-bcdsh"] Jan 28 13:07:47 crc kubenswrapper[4848]: I0128 13:07:47.099745 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-68c6884989-bcdsh"] Jan 28 13:07:48 crc kubenswrapper[4848]: I0128 13:07:48.865276 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84bfeb56-2dcf-420f-9939-b9486ac092ea" path="/var/lib/kubelet/pods/84bfeb56-2dcf-420f-9939-b9486ac092ea/volumes" Jan 28 13:07:48 crc kubenswrapper[4848]: I0128 13:07:48.865995 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8c10aeb-b312-4c6b-aac0-5e5745785034" path="/var/lib/kubelet/pods/b8c10aeb-b312-4c6b-aac0-5e5745785034/volumes" Jan 28 13:07:48 crc kubenswrapper[4848]: I0128 13:07:48.866641 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" path="/var/lib/kubelet/pods/cf32a5d4-51d4-45a4-973f-c47bee280747/volumes" Jan 28 13:07:49 crc kubenswrapper[4848]: I0128 13:07:49.354381 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7b5fd78997-2tm5l" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Jan 28 13:07:53 crc kubenswrapper[4848]: I0128 13:07:53.021344 4848 generic.go:334] "Generic (PLEG): container finished" podID="4f39c6ec-6d59-43de-baef-a3d680b5163f" containerID="e157395a9e088032fe378f93954f217a8dcebf89352c0ad26608856fb015c7c3" exitCode=0 Jan 28 13:07:53 crc kubenswrapper[4848]: I0128 13:07:53.022385 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tfd9p" event={"ID":"4f39c6ec-6d59-43de-baef-a3d680b5163f","Type":"ContainerDied","Data":"e157395a9e088032fe378f93954f217a8dcebf89352c0ad26608856fb015c7c3"} Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.335334 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.400651 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnnv7\" (UniqueName: \"kubernetes.io/projected/be500c59-d797-4d7b-9b99-84a72912c669-kube-api-access-cnnv7\") pod \"be500c59-d797-4d7b-9b99-84a72912c669\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.401129 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/be500c59-d797-4d7b-9b99-84a72912c669-horizon-secret-key\") pod \"be500c59-d797-4d7b-9b99-84a72912c669\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.408905 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be500c59-d797-4d7b-9b99-84a72912c669-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "be500c59-d797-4d7b-9b99-84a72912c669" (UID: "be500c59-d797-4d7b-9b99-84a72912c669"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.409835 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be500c59-d797-4d7b-9b99-84a72912c669-kube-api-access-cnnv7" (OuterVolumeSpecName: "kube-api-access-cnnv7") pod "be500c59-d797-4d7b-9b99-84a72912c669" (UID: "be500c59-d797-4d7b-9b99-84a72912c669"). InnerVolumeSpecName "kube-api-access-cnnv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.504578 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-config-data\") pod \"be500c59-d797-4d7b-9b99-84a72912c669\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.504916 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be500c59-d797-4d7b-9b99-84a72912c669-logs\") pod \"be500c59-d797-4d7b-9b99-84a72912c669\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.504994 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-scripts\") pod \"be500c59-d797-4d7b-9b99-84a72912c669\" (UID: \"be500c59-d797-4d7b-9b99-84a72912c669\") " Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.505414 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be500c59-d797-4d7b-9b99-84a72912c669-logs" (OuterVolumeSpecName: "logs") pod "be500c59-d797-4d7b-9b99-84a72912c669" (UID: "be500c59-d797-4d7b-9b99-84a72912c669"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.505928 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-config-data" (OuterVolumeSpecName: "config-data") pod "be500c59-d797-4d7b-9b99-84a72912c669" (UID: "be500c59-d797-4d7b-9b99-84a72912c669"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.506035 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-scripts" (OuterVolumeSpecName: "scripts") pod "be500c59-d797-4d7b-9b99-84a72912c669" (UID: "be500c59-d797-4d7b-9b99-84a72912c669"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.506179 4848 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/be500c59-d797-4d7b-9b99-84a72912c669-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.506198 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be500c59-d797-4d7b-9b99-84a72912c669-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.506209 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.506218 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnnv7\" (UniqueName: \"kubernetes.io/projected/be500c59-d797-4d7b-9b99-84a72912c669-kube-api-access-cnnv7\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:54 crc kubenswrapper[4848]: I0128 13:07:54.608607 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be500c59-d797-4d7b-9b99-84a72912c669-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:54 crc kubenswrapper[4848]: E0128 13:07:54.774075 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-barbican-api:watcher_latest" Jan 28 13:07:54 crc kubenswrapper[4848]: E0128 13:07:54.774165 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-barbican-api:watcher_latest" Jan 28 13:07:54 crc kubenswrapper[4848]: E0128 13:07:54.774341 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:38.102.83.20:5001/podified-master-centos10/openstack-barbican-api:watcher_latest,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mqcss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-skhpx_openstack(a7100632-3157-40c8-9f9f-a47fcd756ca5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:54 crc kubenswrapper[4848]: E0128 13:07:54.776146 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-skhpx" podUID="a7100632-3157-40c8-9f9f-a47fcd756ca5" Jan 28 13:07:55 crc kubenswrapper[4848]: I0128 13:07:55.048822 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869d8bc777-7mgh8" event={"ID":"be500c59-d797-4d7b-9b99-84a72912c669","Type":"ContainerDied","Data":"dce7037e617d6e284f1683ce19f448d5fd7aa941308d77b6f504fcad94ef8466"} Jan 28 13:07:55 crc kubenswrapper[4848]: I0128 13:07:55.048847 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869d8bc777-7mgh8" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.050703 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-barbican-api:watcher_latest\\\"\"" pod="openstack/barbican-db-sync-skhpx" podUID="a7100632-3157-40c8-9f9f-a47fcd756ca5" Jan 28 13:07:55 crc kubenswrapper[4848]: I0128 13:07:55.132490 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-869d8bc777-7mgh8"] Jan 28 13:07:55 crc kubenswrapper[4848]: I0128 13:07:55.145482 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-869d8bc777-7mgh8"] Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.155968 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-watcher-decision-engine:watcher_latest" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.156042 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-watcher-decision-engine:watcher_latest" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.156284 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-decision-engine,Image:38.102.83.20:5001/podified-master-centos10/openstack-watcher-decision-engine:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68ch5d5h5fdhb7h6dh544h599h86h5dfhch5bch66h58dh54h699h546h58ch699h66fh646hdchf6h64h5bbh5dfh74h5f4h5fch67h5b6h656h7fq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-decision-engine-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/watcher,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:custom-prometheus-ca,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/prometheus/ca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h5cjn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -f -r DRST watcher-decision-engine],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -f -r DRST watcher-decision-engine],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42451,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -f -r DRST watcher-decision-engine],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:6,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-decision-engine-0_openstack(88151fad-4442-4d32-a675-f89f070ed086): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.157543 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.443012 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-watcher-applier:watcher_latest" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.443059 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-watcher-applier:watcher_latest" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.443200 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-applier,Image:38.102.83.20:5001/podified-master-centos10/openstack-watcher-applier:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n689h599h5f6h75h567hf6h676hbbh55h7fhb6hfdhfbh89h67ch555h588hchb9h9ch54ch5bbh5c9h5d4h67dh5dch6h54h65ch54hd9hc6q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-applier-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/watcher,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5ljdh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -r DRST watcher-applier],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:10,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -r DRST watcher-applier],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42451,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -r DRST watcher-applier],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:6,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-applier-0_openstack(b7811364-7959-428c-8be5-751c4b25f597): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:55 crc kubenswrapper[4848]: E0128 13:07:55.445453 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-applier-0" podUID="b7811364-7959-428c-8be5-751c4b25f597" Jan 28 13:07:55 crc kubenswrapper[4848]: I0128 13:07:55.968546 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57844b64c8-6jpl8"] Jan 28 13:07:56 crc kubenswrapper[4848]: E0128 13:07:56.058913 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-watcher-decision-engine:watcher_latest\\\"\"" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" Jan 28 13:07:56 crc kubenswrapper[4848]: E0128 13:07:56.062300 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-watcher-applier:watcher_latest\\\"\"" pod="openstack/watcher-applier-0" podUID="b7811364-7959-428c-8be5-751c4b25f597" Jan 28 13:07:56 crc kubenswrapper[4848]: I0128 13:07:56.861532 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be500c59-d797-4d7b-9b99-84a72912c669" path="/var/lib/kubelet/pods/be500c59-d797-4d7b-9b99-84a72912c669/volumes" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.071945 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tfd9p" event={"ID":"4f39c6ec-6d59-43de-baef-a3d680b5163f","Type":"ContainerDied","Data":"a43baa7b2cff21ee3f8e893ab0d4c14db2af8fc6e37736a0f38f0819e0fcf787"} Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.072000 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a43baa7b2cff21ee3f8e893ab0d4c14db2af8fc6e37736a0f38f0819e0fcf787" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.098647 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.276748 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-combined-ca-bundle\") pod \"4f39c6ec-6d59-43de-baef-a3d680b5163f\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.276829 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwwmf\" (UniqueName: \"kubernetes.io/projected/4f39c6ec-6d59-43de-baef-a3d680b5163f-kube-api-access-dwwmf\") pod \"4f39c6ec-6d59-43de-baef-a3d680b5163f\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.277013 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-config\") pod \"4f39c6ec-6d59-43de-baef-a3d680b5163f\" (UID: \"4f39c6ec-6d59-43de-baef-a3d680b5163f\") " Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.284416 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f39c6ec-6d59-43de-baef-a3d680b5163f-kube-api-access-dwwmf" (OuterVolumeSpecName: "kube-api-access-dwwmf") pod "4f39c6ec-6d59-43de-baef-a3d680b5163f" (UID: "4f39c6ec-6d59-43de-baef-a3d680b5163f"). InnerVolumeSpecName "kube-api-access-dwwmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.306750 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-config" (OuterVolumeSpecName: "config") pod "4f39c6ec-6d59-43de-baef-a3d680b5163f" (UID: "4f39c6ec-6d59-43de-baef-a3d680b5163f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.314982 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f39c6ec-6d59-43de-baef-a3d680b5163f" (UID: "4f39c6ec-6d59-43de-baef-a3d680b5163f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:07:57 crc kubenswrapper[4848]: E0128 13:07:57.327210 4848 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Jan 28 13:07:57 crc kubenswrapper[4848]: E0128 13:07:57.327372 4848 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.20:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Jan 28 13:07:57 crc kubenswrapper[4848]: E0128 13:07:57.327598 4848 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:38.102.83.20:5001/podified-master-centos10/openstack-cinder-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tnb4r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-pdmlr_openstack(ceb3076d-8232-44f5-8184-d727ef5c2943): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 13:07:57 crc kubenswrapper[4848]: E0128 13:07:57.329144 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-pdmlr" podUID="ceb3076d-8232-44f5-8184-d727ef5c2943" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.379836 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.379874 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39c6ec-6d59-43de-baef-a3d680b5163f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:57 crc kubenswrapper[4848]: I0128 13:07:57.379887 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwwmf\" (UniqueName: \"kubernetes.io/projected/4f39c6ec-6d59-43de-baef-a3d680b5163f-kube-api-access-dwwmf\") on node \"crc\" DevicePath \"\"" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.088851 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tfd9p" Jan 28 13:07:58 crc kubenswrapper[4848]: E0128 13:07:58.092644 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.20:5001/podified-master-centos10/openstack-cinder-api:watcher_latest\\\"\"" pod="openstack/cinder-db-sync-pdmlr" podUID="ceb3076d-8232-44f5-8184-d727ef5c2943" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.446130 4848 scope.go:117] "RemoveContainer" containerID="74665a5e907557d7da26720b8da3e6482ef23c04552e5a52e296ffd9fc5b505c" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.535024 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64b6d8556f-wwmfc"] Jan 28 13:07:58 crc kubenswrapper[4848]: E0128 13:07:58.536065 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.536085 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" Jan 28 13:07:58 crc kubenswrapper[4848]: E0128 13:07:58.536100 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="init" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.536106 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="init" Jan 28 13:07:58 crc kubenswrapper[4848]: E0128 13:07:58.536113 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f39c6ec-6d59-43de-baef-a3d680b5163f" containerName="neutron-db-sync" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.536120 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f39c6ec-6d59-43de-baef-a3d680b5163f" containerName="neutron-db-sync" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.536337 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf32a5d4-51d4-45a4-973f-c47bee280747" containerName="dnsmasq-dns" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.536359 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f39c6ec-6d59-43de-baef-a3d680b5163f" containerName="neutron-db-sync" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.540962 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.569145 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64b6d8556f-wwmfc"] Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.638301 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5b675789b4-dl5kz"] Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.647798 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5b675789b4-dl5kz"] Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.649912 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.658538 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-29l4b" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.658851 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.659003 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.659147 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.718974 4848 scope.go:117] "RemoveContainer" containerID="fd7f250bd216ccffed2c106868466a0ceaa9641fd6bb4dfe71e7f3e834f04b33" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.721369 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-nb\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.721447 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-config\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.721490 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-sb\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.725508 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-svc\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.725575 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-swift-storage-0\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.725616 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2w92\" (UniqueName: \"kubernetes.io/projected/14c72ce0-d825-43de-90d0-42dc10f55471-kube-api-access-v2w92\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.828560 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-httpd-config\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.829087 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-combined-ca-bundle\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.829620 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-nb\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.829827 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-ovndb-tls-certs\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.829969 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-config\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.830096 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sfzq\" (UniqueName: \"kubernetes.io/projected/016c28ae-9306-4dd5-a68d-d4dd124b0f79-kube-api-access-5sfzq\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.830133 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-sb\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.830288 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-svc\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.830340 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-swift-storage-0\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.830394 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2w92\" (UniqueName: \"kubernetes.io/projected/14c72ce0-d825-43de-90d0-42dc10f55471-kube-api-access-v2w92\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.830502 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-config\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.832054 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-svc\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.832801 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-sb\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.833712 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-config\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.833994 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-swift-storage-0\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.834838 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-nb\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.867965 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2w92\" (UniqueName: \"kubernetes.io/projected/14c72ce0-d825-43de-90d0-42dc10f55471-kube-api-access-v2w92\") pod \"dnsmasq-dns-64b6d8556f-wwmfc\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.972879 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-httpd-config\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.973517 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-combined-ca-bundle\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.973624 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-ovndb-tls-certs\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.973769 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sfzq\" (UniqueName: \"kubernetes.io/projected/016c28ae-9306-4dd5-a68d-d4dd124b0f79-kube-api-access-5sfzq\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.973958 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-config\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.982462 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-httpd-config\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.984389 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-config\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:58 crc kubenswrapper[4848]: I0128 13:07:58.991072 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-ovndb-tls-certs\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.030201 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.037613 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sfzq\" (UniqueName: \"kubernetes.io/projected/016c28ae-9306-4dd5-a68d-d4dd124b0f79-kube-api-access-5sfzq\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.048445 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-combined-ca-bundle\") pod \"neutron-5b675789b4-dl5kz\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.241469 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68f5655b9d-76qsp"] Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.253607 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"a4d84124-baee-447b-99c9-713b3d13b205","Type":"ContainerStarted","Data":"95c25c6e224a4ad522599d923fdaf50763a91aaa01f1ab2e53c8cb288efb88b7"} Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.254397 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.258595 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.159:9322/\": dial tcp 10.217.0.159:9322: connect: connection refused" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.260292 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57844b64c8-6jpl8" event={"ID":"6b5c0550-a7fd-430e-991f-9eccf00522e2","Type":"ContainerStarted","Data":"5273fc980b21ff2ebc8243726ebd7b41a84d3336af259175b7da40789872771b"} Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.351294 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.391071 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=41.391042843 podStartE2EDuration="41.391042843s" podCreationTimestamp="2026-01-28 13:07:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:07:59.304841512 +0000 UTC m=+1306.217058560" watchObservedRunningTime="2026-01-28 13:07:59.391042843 +0000 UTC m=+1306.303259881" Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.392831 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-x5jfk"] Jan 28 13:07:59 crc kubenswrapper[4848]: W0128 13:07:59.426367 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5436ced_61f3_4be7_ac99_690c2b58939d.slice/crio-04ad76e89c822e3eb5ee99f80cfe3daba7173e3947c1988c017b0d5a28cc2098 WatchSource:0}: Error finding container 04ad76e89c822e3eb5ee99f80cfe3daba7173e3947c1988c017b0d5a28cc2098: Status 404 returned error can't find the container with id 04ad76e89c822e3eb5ee99f80cfe3daba7173e3947c1988c017b0d5a28cc2098 Jan 28 13:07:59 crc kubenswrapper[4848]: I0128 13:07:59.828608 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64b6d8556f-wwmfc"] Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.137604 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5b675789b4-dl5kz"] Jan 28 13:08:00 crc kubenswrapper[4848]: W0128 13:08:00.156500 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod016c28ae_9306_4dd5_a68d_d4dd124b0f79.slice/crio-41559dcff7a4d10fc946e1decc6473254f15749b7a5c6c530d3847a1dff82cc0 WatchSource:0}: Error finding container 41559dcff7a4d10fc946e1decc6473254f15749b7a5c6c530d3847a1dff82cc0: Status 404 returned error can't find the container with id 41559dcff7a4d10fc946e1decc6473254f15749b7a5c6c530d3847a1dff82cc0 Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.318394 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" event={"ID":"14c72ce0-d825-43de-90d0-42dc10f55471","Type":"ContainerStarted","Data":"a80d2557e141bb8f4623507dff3fad81c40bd0a0ab1562521c2be73a44458fad"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.331673 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68f5655b9d-76qsp" event={"ID":"dfa56dc1-1635-454c-95e0-74fdedcf8b00","Type":"ContainerStarted","Data":"570b869fd7919a434974d95ba9a09cb7c6cdac5ee3ddeeb28263d9bd16f221a9"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.331818 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68f5655b9d-76qsp" event={"ID":"dfa56dc1-1635-454c-95e0-74fdedcf8b00","Type":"ContainerStarted","Data":"4ad29c45beee5bcc0a86a84ed91ad585be7dbb3d7d2a6c38eceba07a4568d2b0"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.350720 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57844b64c8-6jpl8" event={"ID":"6b5c0550-a7fd-430e-991f-9eccf00522e2","Type":"ContainerStarted","Data":"efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.350795 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57844b64c8-6jpl8" event={"ID":"6b5c0550-a7fd-430e-991f-9eccf00522e2","Type":"ContainerStarted","Data":"f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.384936 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x5jfk" event={"ID":"a5436ced-61f3-4be7-ac99-690c2b58939d","Type":"ContainerStarted","Data":"27ab91dc319aa5db65d5a398ca8f1941ecf0a581aa4710d2f4dfca8152889cc4"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.385004 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x5jfk" event={"ID":"a5436ced-61f3-4be7-ac99-690c2b58939d","Type":"ContainerStarted","Data":"04ad76e89c822e3eb5ee99f80cfe3daba7173e3947c1988c017b0d5a28cc2098"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.394814 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-57844b64c8-6jpl8" podStartSLOduration=38.897833499 podStartE2EDuration="39.394784533s" podCreationTimestamp="2026-01-28 13:07:21 +0000 UTC" firstStartedPulling="2026-01-28 13:07:58.446193738 +0000 UTC m=+1305.358410776" lastFinishedPulling="2026-01-28 13:07:58.943144772 +0000 UTC m=+1305.855361810" observedRunningTime="2026-01-28 13:08:00.379475191 +0000 UTC m=+1307.291692239" watchObservedRunningTime="2026-01-28 13:08:00.394784533 +0000 UTC m=+1307.307001571" Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.411563 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"f2e69a4a3785c5d66035fd792a22c202c11766e978faf8a57dcccebf228af87c"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.420533 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-x5jfk" podStartSLOduration=25.420512004 podStartE2EDuration="25.420512004s" podCreationTimestamp="2026-01-28 13:07:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:00.418702344 +0000 UTC m=+1307.330919402" watchObservedRunningTime="2026-01-28 13:08:00.420512004 +0000 UTC m=+1307.332729042" Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.420594 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b675789b4-dl5kz" event={"ID":"016c28ae-9306-4dd5-a68d-d4dd124b0f79","Type":"ContainerStarted","Data":"41559dcff7a4d10fc946e1decc6473254f15749b7a5c6c530d3847a1dff82cc0"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.430289 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerStarted","Data":"9b5f576212402a219c02a2fc8cb5e5921b83df0b4d58e213d3d6eb28e9e919ac"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.443379 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-x6pft" event={"ID":"88dde3f6-891e-49d7-a24c-575d166ec790","Type":"ContainerStarted","Data":"a16564a15e676ed80deeabd813eae3942cd683b1f1690fbbf131e6855a92a9ae"} Jan 28 13:08:00 crc kubenswrapper[4848]: I0128 13:08:00.526134 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-x6pft" podStartSLOduration=6.068462963 podStartE2EDuration="49.52610425s" podCreationTimestamp="2026-01-28 13:07:11 +0000 UTC" firstStartedPulling="2026-01-28 13:07:13.537367474 +0000 UTC m=+1260.449584512" lastFinishedPulling="2026-01-28 13:07:56.995008721 +0000 UTC m=+1303.907225799" observedRunningTime="2026-01-28 13:08:00.472942842 +0000 UTC m=+1307.385159880" watchObservedRunningTime="2026-01-28 13:08:00.52610425 +0000 UTC m=+1307.438321288" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.326431 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-988d7f849-l8xdb"] Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.328646 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.345132 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.345432 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.355307 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-988d7f849-l8xdb"] Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.448628 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-combined-ca-bundle\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.448690 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-557jq\" (UniqueName: \"kubernetes.io/projected/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-kube-api-access-557jq\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.448724 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-config\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.448790 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-ovndb-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.448956 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-internal-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.449004 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-public-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.449050 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-httpd-config\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.469836 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68f5655b9d-76qsp" event={"ID":"dfa56dc1-1635-454c-95e0-74fdedcf8b00","Type":"ContainerStarted","Data":"969b5a00c25eead123ad29626e3f99465fb651ddb3ed0dddfd5da3020ffab39c"} Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.476736 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nhdf9" event={"ID":"c9967e9e-d256-4645-be9b-3f3789db9f05","Type":"ContainerStarted","Data":"7900f694b535726a65008368f978830ffccacd4c59b079d1ca8de394b21b1a75"} Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.487645 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b675789b4-dl5kz" event={"ID":"016c28ae-9306-4dd5-a68d-d4dd124b0f79","Type":"ContainerStarted","Data":"32e6d18168dff15a69d817ec29d8f37df1abadb68fe0ad9ba23143e4921a13ed"} Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.487792 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b675789b4-dl5kz" event={"ID":"016c28ae-9306-4dd5-a68d-d4dd124b0f79","Type":"ContainerStarted","Data":"2d2ecde7461e221c7d8d3a2b1caaef35914587f90bd06e2ccad8edacc29c6cac"} Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.488764 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.504515 4848 generic.go:334] "Generic (PLEG): container finished" podID="14c72ce0-d825-43de-90d0-42dc10f55471" containerID="c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599" exitCode=0 Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.507298 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" event={"ID":"14c72ce0-d825-43de-90d0-42dc10f55471","Type":"ContainerDied","Data":"c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599"} Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.512578 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-68f5655b9d-76qsp" podStartSLOduration=40.512549704 podStartE2EDuration="40.512549704s" podCreationTimestamp="2026-01-28 13:07:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:01.504713657 +0000 UTC m=+1308.416930705" watchObservedRunningTime="2026-01-28 13:08:01.512549704 +0000 UTC m=+1308.424766742" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.548553 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.548632 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.557714 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-public-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.557886 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-httpd-config\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.558134 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-combined-ca-bundle\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.558820 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-557jq\" (UniqueName: \"kubernetes.io/projected/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-kube-api-access-557jq\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.558864 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-config\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.559059 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-ovndb-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.566647 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-internal-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.575620 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-ovndb-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.579287 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.579380 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.593823 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-config\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.596535 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-httpd-config\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.597888 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-nhdf9" podStartSLOduration=3.518296147 podStartE2EDuration="47.59785559s" podCreationTimestamp="2026-01-28 13:07:14 +0000 UTC" firstStartedPulling="2026-01-28 13:07:15.881675057 +0000 UTC m=+1262.793892095" lastFinishedPulling="2026-01-28 13:07:59.9612345 +0000 UTC m=+1306.873451538" observedRunningTime="2026-01-28 13:08:01.559963913 +0000 UTC m=+1308.472180971" watchObservedRunningTime="2026-01-28 13:08:01.59785559 +0000 UTC m=+1308.510072628" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.598335 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-combined-ca-bundle\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.600842 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-557jq\" (UniqueName: \"kubernetes.io/projected/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-kube-api-access-557jq\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.603084 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-internal-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.609913 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-public-tls-certs\") pod \"neutron-988d7f849-l8xdb\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.639622 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5b675789b4-dl5kz" podStartSLOduration=3.639584122 podStartE2EDuration="3.639584122s" podCreationTimestamp="2026-01-28 13:07:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:01.593849939 +0000 UTC m=+1308.506066997" watchObservedRunningTime="2026-01-28 13:08:01.639584122 +0000 UTC m=+1308.551801190" Jan 28 13:08:01 crc kubenswrapper[4848]: I0128 13:08:01.651085 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:02 crc kubenswrapper[4848]: I0128 13:08:02.305949 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-988d7f849-l8xdb"] Jan 28 13:08:02 crc kubenswrapper[4848]: I0128 13:08:02.526208 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-988d7f849-l8xdb" event={"ID":"6964a7ca-3376-4df2-8a5d-bb63e731b0a8","Type":"ContainerStarted","Data":"0c0061a76299f6f5c0503c3dfab37089a8ad40dd2464d443216a9249941a2781"} Jan 28 13:08:04 crc kubenswrapper[4848]: I0128 13:08:04.245764 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.484686 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.159:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.562355 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" event={"ID":"14c72ce0-d825-43de-90d0-42dc10f55471","Type":"ContainerStarted","Data":"343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9"} Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.562485 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.567318 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-988d7f849-l8xdb" event={"ID":"6964a7ca-3376-4df2-8a5d-bb63e731b0a8","Type":"ContainerStarted","Data":"9398e20b03b8ae080f593bd5259c6dc97990067a5daec6123799379e953cdc10"} Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.567382 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-988d7f849-l8xdb" event={"ID":"6964a7ca-3376-4df2-8a5d-bb63e731b0a8","Type":"ContainerStarted","Data":"e37496aa995c774aa528b58d0551e096b7b42b5a7f81dfe53a540ad71e104029"} Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.567517 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.597921 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" podStartSLOduration=7.597889802 podStartE2EDuration="7.597889802s" podCreationTimestamp="2026-01-28 13:07:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:05.580825781 +0000 UTC m=+1312.493042819" watchObservedRunningTime="2026-01-28 13:08:05.597889802 +0000 UTC m=+1312.510106840" Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.617876 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-988d7f849-l8xdb" podStartSLOduration=4.617853194 podStartE2EDuration="4.617853194s" podCreationTimestamp="2026-01-28 13:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:05.607765395 +0000 UTC m=+1312.519982443" watchObservedRunningTime="2026-01-28 13:08:05.617853194 +0000 UTC m=+1312.530070232" Jan 28 13:08:05 crc kubenswrapper[4848]: I0128 13:08:05.947139 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 28 13:08:08 crc kubenswrapper[4848]: I0128 13:08:08.603291 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerStarted","Data":"dfb5932cc98a5f499a97317d3e120a3e94e9600f558651bce8cad150d220c057"} Jan 28 13:08:08 crc kubenswrapper[4848]: I0128 13:08:08.605975 4848 generic.go:334] "Generic (PLEG): container finished" podID="88dde3f6-891e-49d7-a24c-575d166ec790" containerID="a16564a15e676ed80deeabd813eae3942cd683b1f1690fbbf131e6855a92a9ae" exitCode=0 Jan 28 13:08:08 crc kubenswrapper[4848]: I0128 13:08:08.606036 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-x6pft" event={"ID":"88dde3f6-891e-49d7-a24c-575d166ec790","Type":"ContainerDied","Data":"a16564a15e676ed80deeabd813eae3942cd683b1f1690fbbf131e6855a92a9ae"} Jan 28 13:08:08 crc kubenswrapper[4848]: I0128 13:08:08.609391 4848 generic.go:334] "Generic (PLEG): container finished" podID="a5436ced-61f3-4be7-ac99-690c2b58939d" containerID="27ab91dc319aa5db65d5a398ca8f1941ecf0a581aa4710d2f4dfca8152889cc4" exitCode=0 Jan 28 13:08:08 crc kubenswrapper[4848]: I0128 13:08:08.609449 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x5jfk" event={"ID":"a5436ced-61f3-4be7-ac99-690c2b58939d","Type":"ContainerDied","Data":"27ab91dc319aa5db65d5a398ca8f1941ecf0a581aa4710d2f4dfca8152889cc4"} Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.031848 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.159088 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77b6dfd897-mp852"] Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.159892 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" podUID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerName="dnsmasq-dns" containerID="cri-o://1092527bac3025c100fadaa6eeba10cdf333a204f1d232dfc529425a221b958d" gracePeriod=10 Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.246768 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.254787 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.628636 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"b7811364-7959-428c-8be5-751c4b25f597","Type":"ContainerStarted","Data":"23076038af305854194c22649f84d10363842cbcb61c3f80ce62c541a3ad1d2a"} Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.637790 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-skhpx" event={"ID":"a7100632-3157-40c8-9f9f-a47fcd756ca5","Type":"ContainerStarted","Data":"111d97c73c4dca61f6d1fe05e1666b02b7e45f4e5913df0033c11304d7d91529"} Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.657303 4848 generic.go:334] "Generic (PLEG): container finished" podID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerID="1092527bac3025c100fadaa6eeba10cdf333a204f1d232dfc529425a221b958d" exitCode=0 Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.659120 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" event={"ID":"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0","Type":"ContainerDied","Data":"1092527bac3025c100fadaa6eeba10cdf333a204f1d232dfc529425a221b958d"} Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.669970 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.670953 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=3.314400477 podStartE2EDuration="51.670923571s" podCreationTimestamp="2026-01-28 13:07:18 +0000 UTC" firstStartedPulling="2026-01-28 13:07:20.614392515 +0000 UTC m=+1267.526609553" lastFinishedPulling="2026-01-28 13:08:08.970915609 +0000 UTC m=+1315.883132647" observedRunningTime="2026-01-28 13:08:09.655766173 +0000 UTC m=+1316.567983221" watchObservedRunningTime="2026-01-28 13:08:09.670923571 +0000 UTC m=+1316.583140609" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.677828 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-skhpx" podStartSLOduration=3.445155416 podStartE2EDuration="58.677801832s" podCreationTimestamp="2026-01-28 13:07:11 +0000 UTC" firstStartedPulling="2026-01-28 13:07:13.740805072 +0000 UTC m=+1260.653022120" lastFinishedPulling="2026-01-28 13:08:08.973451498 +0000 UTC m=+1315.885668536" observedRunningTime="2026-01-28 13:08:09.671391325 +0000 UTC m=+1316.583608383" watchObservedRunningTime="2026-01-28 13:08:09.677801832 +0000 UTC m=+1316.590018870" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.740661 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.814131 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-config\") pod \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.814188 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-svc\") pod \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.814231 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcf6f\" (UniqueName: \"kubernetes.io/projected/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-kube-api-access-kcf6f\") pod \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.814315 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-nb\") pod \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.814425 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-swift-storage-0\") pod \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.814460 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-sb\") pod \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\" (UID: \"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0\") " Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.839535 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-kube-api-access-kcf6f" (OuterVolumeSpecName: "kube-api-access-kcf6f") pod "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" (UID: "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0"). InnerVolumeSpecName "kube-api-access-kcf6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.922865 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcf6f\" (UniqueName: \"kubernetes.io/projected/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-kube-api-access-kcf6f\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.978024 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" (UID: "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.978543 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" (UID: "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.988588 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" (UID: "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:09 crc kubenswrapper[4848]: I0128 13:08:09.994703 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" (UID: "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.020941 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-config" (OuterVolumeSpecName: "config") pod "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" (UID: "667fe1fa-7d88-44a9-ae6f-a6691d04c5a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.024877 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.024922 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.024937 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.024952 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.024963 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.569368 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-x6pft" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.585151 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.698920 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" event={"ID":"667fe1fa-7d88-44a9-ae6f-a6691d04c5a0","Type":"ContainerDied","Data":"e2f170259864e0b7648093617a430df8a0fa2d243f660c11ee923062338dd913"} Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.698990 4848 scope.go:117] "RemoveContainer" containerID="1092527bac3025c100fadaa6eeba10cdf333a204f1d232dfc529425a221b958d" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.699197 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b6dfd897-mp852" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.741892 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nr2g8\" (UniqueName: \"kubernetes.io/projected/88dde3f6-891e-49d7-a24c-575d166ec790-kube-api-access-nr2g8\") pod \"88dde3f6-891e-49d7-a24c-575d166ec790\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.741979 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-combined-ca-bundle\") pod \"88dde3f6-891e-49d7-a24c-575d166ec790\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742023 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-scripts\") pod \"a5436ced-61f3-4be7-ac99-690c2b58939d\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742118 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-config-data\") pod \"88dde3f6-891e-49d7-a24c-575d166ec790\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742205 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-fernet-keys\") pod \"a5436ced-61f3-4be7-ac99-690c2b58939d\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742237 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-config-data\") pod \"a5436ced-61f3-4be7-ac99-690c2b58939d\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742395 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-credential-keys\") pod \"a5436ced-61f3-4be7-ac99-690c2b58939d\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742467 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-combined-ca-bundle\") pod \"a5436ced-61f3-4be7-ac99-690c2b58939d\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742496 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-scripts\") pod \"88dde3f6-891e-49d7-a24c-575d166ec790\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742533 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgbtq\" (UniqueName: \"kubernetes.io/projected/a5436ced-61f3-4be7-ac99-690c2b58939d-kube-api-access-zgbtq\") pod \"a5436ced-61f3-4be7-ac99-690c2b58939d\" (UID: \"a5436ced-61f3-4be7-ac99-690c2b58939d\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.742612 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88dde3f6-891e-49d7-a24c-575d166ec790-logs\") pod \"88dde3f6-891e-49d7-a24c-575d166ec790\" (UID: \"88dde3f6-891e-49d7-a24c-575d166ec790\") " Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.746590 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerStarted","Data":"001771066b162018218fe226fbe7ad7fc7b182a456cc24588b12072a66c88b2a"} Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.766453 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88dde3f6-891e-49d7-a24c-575d166ec790-logs" (OuterVolumeSpecName: "logs") pod "88dde3f6-891e-49d7-a24c-575d166ec790" (UID: "88dde3f6-891e-49d7-a24c-575d166ec790"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.778456 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a5436ced-61f3-4be7-ac99-690c2b58939d" (UID: "a5436ced-61f3-4be7-ac99-690c2b58939d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.780568 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x5jfk" event={"ID":"a5436ced-61f3-4be7-ac99-690c2b58939d","Type":"ContainerDied","Data":"04ad76e89c822e3eb5ee99f80cfe3daba7173e3947c1988c017b0d5a28cc2098"} Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.780627 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04ad76e89c822e3eb5ee99f80cfe3daba7173e3947c1988c017b0d5a28cc2098" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.780769 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x5jfk" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.789869 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88dde3f6-891e-49d7-a24c-575d166ec790-kube-api-access-nr2g8" (OuterVolumeSpecName: "kube-api-access-nr2g8") pod "88dde3f6-891e-49d7-a24c-575d166ec790" (UID: "88dde3f6-891e-49d7-a24c-575d166ec790"). InnerVolumeSpecName "kube-api-access-nr2g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.802390 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-x6pft" event={"ID":"88dde3f6-891e-49d7-a24c-575d166ec790","Type":"ContainerDied","Data":"cd3ed2c15b556659af4fd9733f07f45d5053f098be2c426424c0cf00471ea261"} Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.802481 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd3ed2c15b556659af4fd9733f07f45d5053f098be2c426424c0cf00471ea261" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.807402 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a5436ced-61f3-4be7-ac99-690c2b58939d" (UID: "a5436ced-61f3-4be7-ac99-690c2b58939d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.809031 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-x6pft" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.809054 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-scripts" (OuterVolumeSpecName: "scripts") pod "88dde3f6-891e-49d7-a24c-575d166ec790" (UID: "88dde3f6-891e-49d7-a24c-575d166ec790"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.816600 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-scripts" (OuterVolumeSpecName: "scripts") pod "a5436ced-61f3-4be7-ac99-690c2b58939d" (UID: "a5436ced-61f3-4be7-ac99-690c2b58939d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.829116 4848 scope.go:117] "RemoveContainer" containerID="03bb61c16e9e91b9fbffc4057dbf199348c1708c63a18e541cfc1a85f8cc6479" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.829127 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-config-data" (OuterVolumeSpecName: "config-data") pod "a5436ced-61f3-4be7-ac99-690c2b58939d" (UID: "a5436ced-61f3-4be7-ac99-690c2b58939d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.836946 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-config-data" (OuterVolumeSpecName: "config-data") pod "88dde3f6-891e-49d7-a24c-575d166ec790" (UID: "88dde3f6-891e-49d7-a24c-575d166ec790"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.840659 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5436ced-61f3-4be7-ac99-690c2b58939d-kube-api-access-zgbtq" (OuterVolumeSpecName: "kube-api-access-zgbtq") pod "a5436ced-61f3-4be7-ac99-690c2b58939d" (UID: "a5436ced-61f3-4be7-ac99-690c2b58939d"). InnerVolumeSpecName "kube-api-access-zgbtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853716 4848 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853780 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853796 4848 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853822 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853834 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgbtq\" (UniqueName: \"kubernetes.io/projected/a5436ced-61f3-4be7-ac99-690c2b58939d-kube-api-access-zgbtq\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853844 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88dde3f6-891e-49d7-a24c-575d166ec790-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853862 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nr2g8\" (UniqueName: \"kubernetes.io/projected/88dde3f6-891e-49d7-a24c-575d166ec790-kube-api-access-nr2g8\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853873 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.853891 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.873781 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88dde3f6-891e-49d7-a24c-575d166ec790" (UID: "88dde3f6-891e-49d7-a24c-575d166ec790"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.874203 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=3.480014982 podStartE2EDuration="52.874184103s" podCreationTimestamp="2026-01-28 13:07:18 +0000 UTC" firstStartedPulling="2026-01-28 13:07:20.613345837 +0000 UTC m=+1267.525562865" lastFinishedPulling="2026-01-28 13:08:10.007514958 +0000 UTC m=+1316.919731986" observedRunningTime="2026-01-28 13:08:10.817855358 +0000 UTC m=+1317.730072406" watchObservedRunningTime="2026-01-28 13:08:10.874184103 +0000 UTC m=+1317.786401141" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.902552 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5436ced-61f3-4be7-ac99-690c2b58939d" (UID: "a5436ced-61f3-4be7-ac99-690c2b58939d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.933448 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77b6dfd897-mp852"] Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.941965 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77b6dfd897-mp852"] Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.955774 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88dde3f6-891e-49d7-a24c-575d166ec790-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.955810 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5436ced-61f3-4be7-ac99-690c2b58939d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.970341 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-648cdddfd-q5sbd"] Jan 28 13:08:10 crc kubenswrapper[4848]: E0128 13:08:10.970887 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5436ced-61f3-4be7-ac99-690c2b58939d" containerName="keystone-bootstrap" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.970908 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5436ced-61f3-4be7-ac99-690c2b58939d" containerName="keystone-bootstrap" Jan 28 13:08:10 crc kubenswrapper[4848]: E0128 13:08:10.970930 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerName="init" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.970937 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerName="init" Jan 28 13:08:10 crc kubenswrapper[4848]: E0128 13:08:10.970948 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88dde3f6-891e-49d7-a24c-575d166ec790" containerName="placement-db-sync" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.970954 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="88dde3f6-891e-49d7-a24c-575d166ec790" containerName="placement-db-sync" Jan 28 13:08:10 crc kubenswrapper[4848]: E0128 13:08:10.970976 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerName="dnsmasq-dns" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.970985 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerName="dnsmasq-dns" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.971168 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="88dde3f6-891e-49d7-a24c-575d166ec790" containerName="placement-db-sync" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.971182 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" containerName="dnsmasq-dns" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.971196 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5436ced-61f3-4be7-ac99-690c2b58939d" containerName="keystone-bootstrap" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.972668 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.989532 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 28 13:08:10 crc kubenswrapper[4848]: I0128 13:08:10.989535 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-648cdddfd-q5sbd"] Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.001793 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.002294 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-d454d7fbb-hth9j"] Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.003939 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.019374 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d454d7fbb-hth9j"] Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.020783 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.021035 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.059877 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-public-tls-certs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.060680 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-scripts\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.060823 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-internal-tls-certs\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.060938 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-config-data\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.061088 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-scripts\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.061179 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqls4\" (UniqueName: \"kubernetes.io/projected/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-kube-api-access-nqls4\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.061281 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-credential-keys\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.061402 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-combined-ca-bundle\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.061530 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-public-tls-certs\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.061878 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-combined-ca-bundle\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.062034 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-config-data\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.062210 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-logs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.062430 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-fernet-keys\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.062624 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-internal-tls-certs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.062711 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w562n\" (UniqueName: \"kubernetes.io/projected/e364a091-9a40-455c-b2dc-fd9a5d51181a-kube-api-access-w562n\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.164734 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-fernet-keys\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165080 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-internal-tls-certs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165156 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w562n\" (UniqueName: \"kubernetes.io/projected/e364a091-9a40-455c-b2dc-fd9a5d51181a-kube-api-access-w562n\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165267 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-public-tls-certs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165341 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-scripts\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165416 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-internal-tls-certs\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165498 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-config-data\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165584 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-scripts\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165661 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqls4\" (UniqueName: \"kubernetes.io/projected/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-kube-api-access-nqls4\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165734 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-credential-keys\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165811 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-combined-ca-bundle\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165890 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-public-tls-certs\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.165962 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-combined-ca-bundle\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.166083 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-config-data\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.166156 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-logs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.167554 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-logs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.176550 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-scripts\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.185154 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-public-tls-certs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.194849 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-combined-ca-bundle\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.195842 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-config-data\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.196427 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-fernet-keys\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.198016 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-public-tls-certs\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.199136 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-combined-ca-bundle\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.199265 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-credential-keys\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.200277 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-internal-tls-certs\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.204745 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-internal-tls-certs\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.205909 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e364a091-9a40-455c-b2dc-fd9a5d51181a-scripts\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.217080 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-config-data\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.220291 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w562n\" (UniqueName: \"kubernetes.io/projected/e364a091-9a40-455c-b2dc-fd9a5d51181a-kube-api-access-w562n\") pod \"keystone-d454d7fbb-hth9j\" (UID: \"e364a091-9a40-455c-b2dc-fd9a5d51181a\") " pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.224907 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqls4\" (UniqueName: \"kubernetes.io/projected/8e4ac2f3-a03f-4338-8cd3-188dc4829ea9-kube-api-access-nqls4\") pod \"placement-648cdddfd-q5sbd\" (UID: \"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9\") " pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.396593 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.438997 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.543146 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 28 13:08:11 crc kubenswrapper[4848]: I0128 13:08:11.582061 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68f5655b9d-76qsp" podUID="dfa56dc1-1635-454c-95e0-74fdedcf8b00" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.099856 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d454d7fbb-hth9j"] Jan 28 13:08:12 crc kubenswrapper[4848]: W0128 13:08:12.131437 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode364a091_9a40_455c_b2dc_fd9a5d51181a.slice/crio-f5dc69aae34dd02ccc878b27df66c11d332da01a8e66a5079c34c1af069273f2 WatchSource:0}: Error finding container f5dc69aae34dd02ccc878b27df66c11d332da01a8e66a5079c34c1af069273f2: Status 404 returned error can't find the container with id f5dc69aae34dd02ccc878b27df66c11d332da01a8e66a5079c34c1af069273f2 Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.261183 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-648cdddfd-q5sbd"] Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.891144 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="667fe1fa-7d88-44a9-ae6f-a6691d04c5a0" path="/var/lib/kubelet/pods/667fe1fa-7d88-44a9-ae6f-a6691d04c5a0/volumes" Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.892402 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.892495 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d454d7fbb-hth9j" event={"ID":"e364a091-9a40-455c-b2dc-fd9a5d51181a","Type":"ContainerStarted","Data":"722083ebee69ef57ea0a5e4718471c77147cc2a9a0162a6259722bc8d7b875a2"} Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.892562 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d454d7fbb-hth9j" event={"ID":"e364a091-9a40-455c-b2dc-fd9a5d51181a","Type":"ContainerStarted","Data":"f5dc69aae34dd02ccc878b27df66c11d332da01a8e66a5079c34c1af069273f2"} Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.926149 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-d454d7fbb-hth9j" podStartSLOduration=2.926117073 podStartE2EDuration="2.926117073s" podCreationTimestamp="2026-01-28 13:08:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:12.916123817 +0000 UTC m=+1319.828340855" watchObservedRunningTime="2026-01-28 13:08:12.926117073 +0000 UTC m=+1319.838334111" Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.926424 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-648cdddfd-q5sbd" event={"ID":"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9","Type":"ContainerStarted","Data":"a8012f5251863c0acbeab94bd220bdade1cf49b467fe8e25e9534e6064ea9a33"} Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.926476 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-648cdddfd-q5sbd" event={"ID":"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9","Type":"ContainerStarted","Data":"54daa99a232e1db70ae261e74b11b846d02b5001c6b1572e198c4a78b09df6d7"} Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.935155 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pdmlr" event={"ID":"ceb3076d-8232-44f5-8184-d727ef5c2943","Type":"ContainerStarted","Data":"97e52eab9d0acfa4667a46149584107f8e4a28dee028ff36cba94db1a32b055d"} Jan 28 13:08:12 crc kubenswrapper[4848]: I0128 13:08:12.958216 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-pdmlr" podStartSLOduration=4.44903822 podStartE2EDuration="1m1.958182589s" podCreationTimestamp="2026-01-28 13:07:11 +0000 UTC" firstStartedPulling="2026-01-28 13:07:13.740812312 +0000 UTC m=+1260.653029350" lastFinishedPulling="2026-01-28 13:08:11.249956681 +0000 UTC m=+1318.162173719" observedRunningTime="2026-01-28 13:08:12.95568348 +0000 UTC m=+1319.867900518" watchObservedRunningTime="2026-01-28 13:08:12.958182589 +0000 UTC m=+1319.870399627" Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.760305 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.761020 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api-log" containerID="cri-o://2d1c5f21784660bf9afb2e4dd942320344fe1759c608ff998a970a68989b068f" gracePeriod=30 Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.761233 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" containerID="cri-o://95c25c6e224a4ad522599d923fdaf50763a91aaa01f1ab2e53c8cb288efb88b7" gracePeriod=30 Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.957104 4848 generic.go:334] "Generic (PLEG): container finished" podID="a4d84124-baee-447b-99c9-713b3d13b205" containerID="2d1c5f21784660bf9afb2e4dd942320344fe1759c608ff998a970a68989b068f" exitCode=143 Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.957189 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"a4d84124-baee-447b-99c9-713b3d13b205","Type":"ContainerDied","Data":"2d1c5f21784660bf9afb2e4dd942320344fe1759c608ff998a970a68989b068f"} Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.963181 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-648cdddfd-q5sbd" event={"ID":"8e4ac2f3-a03f-4338-8cd3-188dc4829ea9","Type":"ContainerStarted","Data":"6c77bbbd6cd86362cb619e04051c4afd0c1f9b7c2eb0eff1ae217f30549af1c8"} Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.963273 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:13 crc kubenswrapper[4848]: I0128 13:08:13.963295 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:14 crc kubenswrapper[4848]: I0128 13:08:14.000812 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-648cdddfd-q5sbd" podStartSLOduration=4.000789303 podStartE2EDuration="4.000789303s" podCreationTimestamp="2026-01-28 13:08:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:13.99450161 +0000 UTC m=+1320.906718648" watchObservedRunningTime="2026-01-28 13:08:14.000789303 +0000 UTC m=+1320.913006341" Jan 28 13:08:14 crc kubenswrapper[4848]: I0128 13:08:14.427636 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 28 13:08:15 crc kubenswrapper[4848]: I0128 13:08:15.397495 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.159:9322/\": read tcp 10.217.0.2:58960->10.217.0.159:9322: read: connection reset by peer" Jan 28 13:08:15 crc kubenswrapper[4848]: I0128 13:08:15.397514 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9322/\": read tcp 10.217.0.2:58956->10.217.0.159:9322: read: connection reset by peer" Jan 28 13:08:15 crc kubenswrapper[4848]: I0128 13:08:15.993145 4848 generic.go:334] "Generic (PLEG): container finished" podID="a4d84124-baee-447b-99c9-713b3d13b205" containerID="95c25c6e224a4ad522599d923fdaf50763a91aaa01f1ab2e53c8cb288efb88b7" exitCode=0 Jan 28 13:08:15 crc kubenswrapper[4848]: I0128 13:08:15.993218 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"a4d84124-baee-447b-99c9-713b3d13b205","Type":"ContainerDied","Data":"95c25c6e224a4ad522599d923fdaf50763a91aaa01f1ab2e53c8cb288efb88b7"} Jan 28 13:08:15 crc kubenswrapper[4848]: I0128 13:08:15.995944 4848 generic.go:334] "Generic (PLEG): container finished" podID="a7100632-3157-40c8-9f9f-a47fcd756ca5" containerID="111d97c73c4dca61f6d1fe05e1666b02b7e45f4e5913df0033c11304d7d91529" exitCode=0 Jan 28 13:08:15 crc kubenswrapper[4848]: I0128 13:08:15.995993 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-skhpx" event={"ID":"a7100632-3157-40c8-9f9f-a47fcd756ca5","Type":"ContainerDied","Data":"111d97c73c4dca61f6d1fe05e1666b02b7e45f4e5913df0033c11304d7d91529"} Jan 28 13:08:17 crc kubenswrapper[4848]: I0128 13:08:17.012877 4848 generic.go:334] "Generic (PLEG): container finished" podID="88151fad-4442-4d32-a675-f89f070ed086" containerID="001771066b162018218fe226fbe7ad7fc7b182a456cc24588b12072a66c88b2a" exitCode=1 Jan 28 13:08:17 crc kubenswrapper[4848]: I0128 13:08:17.012969 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerDied","Data":"001771066b162018218fe226fbe7ad7fc7b182a456cc24588b12072a66c88b2a"} Jan 28 13:08:17 crc kubenswrapper[4848]: I0128 13:08:17.014541 4848 scope.go:117] "RemoveContainer" containerID="001771066b162018218fe226fbe7ad7fc7b182a456cc24588b12072a66c88b2a" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.052408 4848 generic.go:334] "Generic (PLEG): container finished" podID="c9967e9e-d256-4645-be9b-3f3789db9f05" containerID="7900f694b535726a65008368f978830ffccacd4c59b079d1ca8de394b21b1a75" exitCode=0 Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.052480 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nhdf9" event={"ID":"c9967e9e-d256-4645-be9b-3f3789db9f05","Type":"ContainerDied","Data":"7900f694b535726a65008368f978830ffccacd4c59b079d1ca8de394b21b1a75"} Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.246496 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.159:9322/\": dial tcp 10.217.0.159:9322: connect: connection refused" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.246493 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9322/\": dial tcp 10.217.0.159:9322: connect: connection refused" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.315916 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.316023 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.316037 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.316046 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.427980 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.472222 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.627350 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-skhpx" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.688037 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-combined-ca-bundle\") pod \"a7100632-3157-40c8-9f9f-a47fcd756ca5\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.688111 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqcss\" (UniqueName: \"kubernetes.io/projected/a7100632-3157-40c8-9f9f-a47fcd756ca5-kube-api-access-mqcss\") pod \"a7100632-3157-40c8-9f9f-a47fcd756ca5\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.688208 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-db-sync-config-data\") pod \"a7100632-3157-40c8-9f9f-a47fcd756ca5\" (UID: \"a7100632-3157-40c8-9f9f-a47fcd756ca5\") " Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.697428 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7100632-3157-40c8-9f9f-a47fcd756ca5-kube-api-access-mqcss" (OuterVolumeSpecName: "kube-api-access-mqcss") pod "a7100632-3157-40c8-9f9f-a47fcd756ca5" (UID: "a7100632-3157-40c8-9f9f-a47fcd756ca5"). InnerVolumeSpecName "kube-api-access-mqcss". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.701737 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a7100632-3157-40c8-9f9f-a47fcd756ca5" (UID: "a7100632-3157-40c8-9f9f-a47fcd756ca5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.747779 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7100632-3157-40c8-9f9f-a47fcd756ca5" (UID: "a7100632-3157-40c8-9f9f-a47fcd756ca5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.790551 4848 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.790651 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7100632-3157-40c8-9f9f-a47fcd756ca5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:19 crc kubenswrapper[4848]: I0128 13:08:19.790668 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqcss\" (UniqueName: \"kubernetes.io/projected/a7100632-3157-40c8-9f9f-a47fcd756ca5-kube-api-access-mqcss\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.064764 4848 generic.go:334] "Generic (PLEG): container finished" podID="ceb3076d-8232-44f5-8184-d727ef5c2943" containerID="97e52eab9d0acfa4667a46149584107f8e4a28dee028ff36cba94db1a32b055d" exitCode=0 Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.064839 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pdmlr" event={"ID":"ceb3076d-8232-44f5-8184-d727ef5c2943","Type":"ContainerDied","Data":"97e52eab9d0acfa4667a46149584107f8e4a28dee028ff36cba94db1a32b055d"} Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.066681 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-skhpx" event={"ID":"a7100632-3157-40c8-9f9f-a47fcd756ca5","Type":"ContainerDied","Data":"666146865fb1d2b7fc44d283b229e4437822129277ecf795e0b4bc145d96fb31"} Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.066702 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="666146865fb1d2b7fc44d283b229e4437822129277ecf795e0b4bc145d96fb31" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.066762 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-skhpx" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.113343 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.372696 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.504961 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7nwr\" (UniqueName: \"kubernetes.io/projected/a4d84124-baee-447b-99c9-713b3d13b205-kube-api-access-m7nwr\") pod \"a4d84124-baee-447b-99c9-713b3d13b205\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.505478 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4d84124-baee-447b-99c9-713b3d13b205-logs\") pod \"a4d84124-baee-447b-99c9-713b3d13b205\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.505646 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-combined-ca-bundle\") pod \"a4d84124-baee-447b-99c9-713b3d13b205\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.505731 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-config-data\") pod \"a4d84124-baee-447b-99c9-713b3d13b205\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.505787 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-custom-prometheus-ca\") pod \"a4d84124-baee-447b-99c9-713b3d13b205\" (UID: \"a4d84124-baee-447b-99c9-713b3d13b205\") " Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.512078 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4d84124-baee-447b-99c9-713b3d13b205-logs" (OuterVolumeSpecName: "logs") pod "a4d84124-baee-447b-99c9-713b3d13b205" (UID: "a4d84124-baee-447b-99c9-713b3d13b205"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.513420 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4d84124-baee-447b-99c9-713b3d13b205-kube-api-access-m7nwr" (OuterVolumeSpecName: "kube-api-access-m7nwr") pod "a4d84124-baee-447b-99c9-713b3d13b205" (UID: "a4d84124-baee-447b-99c9-713b3d13b205"). InnerVolumeSpecName "kube-api-access-m7nwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.545802 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4d84124-baee-447b-99c9-713b3d13b205" (UID: "a4d84124-baee-447b-99c9-713b3d13b205"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.552661 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "a4d84124-baee-447b-99c9-713b3d13b205" (UID: "a4d84124-baee-447b-99c9-713b3d13b205"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.609047 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-config-data" (OuterVolumeSpecName: "config-data") pod "a4d84124-baee-447b-99c9-713b3d13b205" (UID: "a4d84124-baee-447b-99c9-713b3d13b205"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.609235 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.609289 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.609332 4848 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a4d84124-baee-447b-99c9-713b3d13b205-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.609348 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7nwr\" (UniqueName: \"kubernetes.io/projected/a4d84124-baee-447b-99c9-713b3d13b205-kube-api-access-m7nwr\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.609364 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4d84124-baee-447b-99c9-713b3d13b205-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.878065 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-bd96c8879-gdtwm"] Jan 28 13:08:20 crc kubenswrapper[4848]: E0128 13:08:20.878719 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.878731 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" Jan 28 13:08:20 crc kubenswrapper[4848]: E0128 13:08:20.878759 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api-log" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.878765 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api-log" Jan 28 13:08:20 crc kubenswrapper[4848]: E0128 13:08:20.878783 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7100632-3157-40c8-9f9f-a47fcd756ca5" containerName="barbican-db-sync" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.878789 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7100632-3157-40c8-9f9f-a47fcd756ca5" containerName="barbican-db-sync" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.879006 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api-log" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.879037 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4d84124-baee-447b-99c9-713b3d13b205" containerName="watcher-api" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.879048 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7100632-3157-40c8-9f9f-a47fcd756ca5" containerName="barbican-db-sync" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.880122 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.886032 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2n7gr" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.888738 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.896803 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 28 13:08:20 crc kubenswrapper[4848]: I0128 13:08:20.917789 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bd96c8879-gdtwm"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.035928 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-combined-ca-bundle\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.036019 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqtvw\" (UniqueName: \"kubernetes.io/projected/a56e7c4e-4ce2-4742-8645-6201f8c957f7-kube-api-access-qqtvw\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.036078 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a56e7c4e-4ce2-4742-8645-6201f8c957f7-logs\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.036129 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-config-data-custom\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.036184 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-config-data\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.072444 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.088318 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.096767 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.142838 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-combined-ca-bundle\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.142933 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqtvw\" (UniqueName: \"kubernetes.io/projected/a56e7c4e-4ce2-4742-8645-6201f8c957f7-kube-api-access-qqtvw\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.142990 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a56e7c4e-4ce2-4742-8645-6201f8c957f7-logs\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.143036 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-config-data-custom\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.143091 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-config-data\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.143756 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a56e7c4e-4ce2-4742-8645-6201f8c957f7-logs\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.149666 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-combined-ca-bundle\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.153308 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.159088 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-config-data-custom\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.163580 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a56e7c4e-4ce2-4742-8645-6201f8c957f7-config-data\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.197724 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nhdf9" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.213659 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerStarted","Data":"3e40610fb6105a3e7609422162fe2823d3412ba91eac26dc9908de2ad373294c"} Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.220080 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqtvw\" (UniqueName: \"kubernetes.io/projected/a56e7c4e-4ce2-4742-8645-6201f8c957f7-kube-api-access-qqtvw\") pod \"barbican-worker-bd96c8879-gdtwm\" (UID: \"a56e7c4e-4ce2-4742-8645-6201f8c957f7\") " pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.249331 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586f4f6dc5-xc8tj"] Jan 28 13:08:21 crc kubenswrapper[4848]: E0128 13:08:21.249890 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9967e9e-d256-4645-be9b-3f3789db9f05" containerName="glance-db-sync" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.249904 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9967e9e-d256-4645-be9b-3f3789db9f05" containerName="glance-db-sync" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.250125 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9967e9e-d256-4645-be9b-3f3789db9f05" containerName="glance-db-sync" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.251299 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.251496 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"a4d84124-baee-447b-99c9-713b3d13b205","Type":"ContainerDied","Data":"c632903c39db540a0a12c6832254545e0ba73fe635c58b71486415f87d46560a"} Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.251576 4848 scope.go:117] "RemoveContainer" containerID="95c25c6e224a4ad522599d923fdaf50763a91aaa01f1ab2e53c8cb288efb88b7" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.251797 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.275867 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nhdf9" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.275990 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nhdf9" event={"ID":"c9967e9e-d256-4645-be9b-3f3789db9f05","Type":"ContainerDied","Data":"4ff566c73a7bba7e8410f1df88750fa087ed235b6c93783b086d5a4ac44a550c"} Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.276039 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ff566c73a7bba7e8410f1df88750fa087ed235b6c93783b086d5a4ac44a550c" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.296208 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-config-data-custom\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.296296 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-config-data\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.296483 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a72021f-6e14-4681-b127-7c85be7c597c-logs\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.296601 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg887\" (UniqueName: \"kubernetes.io/projected/7a72021f-6e14-4681-b127-7c85be7c597c-kube-api-access-sg887\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.296692 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-combined-ca-bundle\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.335154 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586f4f6dc5-xc8tj"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.349680 4848 scope.go:117] "RemoveContainer" containerID="2d1c5f21784660bf9afb2e4dd942320344fe1759c608ff998a970a68989b068f" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.402448 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-config-data-custom\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.402882 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-config-data\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.402969 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a72021f-6e14-4681-b127-7c85be7c597c-logs\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.403019 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg887\" (UniqueName: \"kubernetes.io/projected/7a72021f-6e14-4681-b127-7c85be7c597c-kube-api-access-sg887\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.403058 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-combined-ca-bundle\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.411829 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a72021f-6e14-4681-b127-7c85be7c597c-logs\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.440207 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-config-data-custom\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.440453 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-config-data\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.440644 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bd96c8879-gdtwm" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.457344 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg887\" (UniqueName: \"kubernetes.io/projected/7a72021f-6e14-4681-b127-7c85be7c597c-kube-api-access-sg887\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.463223 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a72021f-6e14-4681-b127-7c85be7c597c-combined-ca-bundle\") pod \"barbican-keystone-listener-5cb69d9f6b-f9ck5\" (UID: \"7a72021f-6e14-4681-b127-7c85be7c597c\") " pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.467420 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.507205 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgc8n\" (UniqueName: \"kubernetes.io/projected/c9967e9e-d256-4645-be9b-3f3789db9f05-kube-api-access-kgc8n\") pod \"c9967e9e-d256-4645-be9b-3f3789db9f05\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.507373 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-db-sync-config-data\") pod \"c9967e9e-d256-4645-be9b-3f3789db9f05\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.507462 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-config-data\") pod \"c9967e9e-d256-4645-be9b-3f3789db9f05\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.507636 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-combined-ca-bundle\") pod \"c9967e9e-d256-4645-be9b-3f3789db9f05\" (UID: \"c9967e9e-d256-4645-be9b-3f3789db9f05\") " Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.508092 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-config\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.508156 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvbvl\" (UniqueName: \"kubernetes.io/projected/ead300c1-4c31-4c66-91a5-ac7609850be6-kube-api-access-kvbvl\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.508410 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-svc\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.508474 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-sb\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.508505 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-nb\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.508572 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-swift-storage-0\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.515909 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c9967e9e-d256-4645-be9b-3f3789db9f05" (UID: "c9967e9e-d256-4645-be9b-3f3789db9f05"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.543620 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9967e9e-d256-4645-be9b-3f3789db9f05-kube-api-access-kgc8n" (OuterVolumeSpecName: "kube-api-access-kgc8n") pod "c9967e9e-d256-4645-be9b-3f3789db9f05" (UID: "c9967e9e-d256-4645-be9b-3f3789db9f05"). InnerVolumeSpecName "kube-api-access-kgc8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.569469 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-67d5988776-4bwdg"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.572026 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.600242 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.613961 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvbvl\" (UniqueName: \"kubernetes.io/projected/ead300c1-4c31-4c66-91a5-ac7609850be6-kube-api-access-kvbvl\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.614179 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-svc\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.614235 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-sb\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.614294 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-nb\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.614329 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-swift-storage-0\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.614379 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-config\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.614453 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgc8n\" (UniqueName: \"kubernetes.io/projected/c9967e9e-d256-4645-be9b-3f3789db9f05-kube-api-access-kgc8n\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.614485 4848 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.615643 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-config\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.615878 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.620017 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-nb\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.620813 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-svc\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.621517 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-sb\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.634025 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-swift-storage-0\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.639330 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.660139 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9967e9e-d256-4645-be9b-3f3789db9f05" (UID: "c9967e9e-d256-4645-be9b-3f3789db9f05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.660230 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-67d5988776-4bwdg"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.671558 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.675149 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.679868 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.680156 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.680665 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.692267 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvbvl\" (UniqueName: \"kubernetes.io/projected/ead300c1-4c31-4c66-91a5-ac7609850be6-kube-api-access-kvbvl\") pod \"dnsmasq-dns-586f4f6dc5-xc8tj\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.705974 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.722732 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tvps\" (UniqueName: \"kubernetes.io/projected/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-kube-api-access-6tvps\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.722995 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.723063 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data-custom\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.755718 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-logs\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.755781 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-combined-ca-bundle\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.756065 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.789299 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-config-data" (OuterVolumeSpecName: "config-data") pod "c9967e9e-d256-4645-be9b-3f3789db9f05" (UID: "c9967e9e-d256-4645-be9b-3f3789db9f05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:21 crc kubenswrapper[4848]: E0128 13:08:21.855618 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.858703 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c33d357-d7c0-4239-a58e-d882b915fafb-logs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.858769 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.858800 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.858853 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25psd\" (UniqueName: \"kubernetes.io/projected/2c33d357-d7c0-4239-a58e-d882b915fafb-kube-api-access-25psd\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.858904 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data-custom\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.858926 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-config-data\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.859002 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-logs\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.859080 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-public-tls-certs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.859112 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-combined-ca-bundle\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.859202 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tvps\" (UniqueName: \"kubernetes.io/projected/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-kube-api-access-6tvps\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.859297 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.859330 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.859410 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9967e9e-d256-4645-be9b-3f3789db9f05-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.868083 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-logs\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.869131 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data-custom\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.870408 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-combined-ca-bundle\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.888939 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.895166 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tvps\" (UniqueName: \"kubernetes.io/projected/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-kube-api-access-6tvps\") pod \"barbican-api-67d5988776-4bwdg\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.906863 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.961220 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25psd\" (UniqueName: \"kubernetes.io/projected/2c33d357-d7c0-4239-a58e-d882b915fafb-kube-api-access-25psd\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.961311 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-config-data\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.961365 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-public-tls-certs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.961441 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.961468 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.961551 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c33d357-d7c0-4239-a58e-d882b915fafb-logs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.961575 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.964647 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c33d357-d7c0-4239-a58e-d882b915fafb-logs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.973319 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-config-data\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.978470 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.979011 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.979042 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-public-tls-certs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.979164 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c33d357-d7c0-4239-a58e-d882b915fafb-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:21 crc kubenswrapper[4848]: I0128 13:08:21.982531 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25psd\" (UniqueName: \"kubernetes.io/projected/2c33d357-d7c0-4239-a58e-d882b915fafb-kube-api-access-25psd\") pod \"watcher-api-0\" (UID: \"2c33d357-d7c0-4239-a58e-d882b915fafb\") " pod="openstack/watcher-api-0" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.018240 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.046548 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.199401 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.294684 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bd96c8879-gdtwm"] Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.325718 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5"] Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.340772 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerStarted","Data":"4a626b3dc7a8d703278a77841e50e1667868bcc46d0844b23a9776bdee530558"} Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.340996 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="ceilometer-notification-agent" containerID="cri-o://9b5f576212402a219c02a2fc8cb5e5921b83df0b4d58e213d3d6eb28e9e919ac" gracePeriod=30 Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.341532 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.341898 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="proxy-httpd" containerID="cri-o://4a626b3dc7a8d703278a77841e50e1667868bcc46d0844b23a9776bdee530558" gracePeriod=30 Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.341955 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="sg-core" containerID="cri-o://dfb5932cc98a5f499a97317d3e120a3e94e9600f558651bce8cad150d220c057" gracePeriod=30 Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.355280 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pdmlr" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.355375 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pdmlr" event={"ID":"ceb3076d-8232-44f5-8184-d727ef5c2943","Type":"ContainerDied","Data":"b0f2dde9d8b3002d523458aed978f09d742b7c8768e14a72a393b6cc71ee361f"} Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.355440 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0f2dde9d8b3002d523458aed978f09d742b7c8768e14a72a393b6cc71ee361f" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.387906 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-db-sync-config-data\") pod \"ceb3076d-8232-44f5-8184-d727ef5c2943\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.387985 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-scripts\") pod \"ceb3076d-8232-44f5-8184-d727ef5c2943\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.388174 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnb4r\" (UniqueName: \"kubernetes.io/projected/ceb3076d-8232-44f5-8184-d727ef5c2943-kube-api-access-tnb4r\") pod \"ceb3076d-8232-44f5-8184-d727ef5c2943\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.388239 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-combined-ca-bundle\") pod \"ceb3076d-8232-44f5-8184-d727ef5c2943\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.388292 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ceb3076d-8232-44f5-8184-d727ef5c2943-etc-machine-id\") pod \"ceb3076d-8232-44f5-8184-d727ef5c2943\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.388365 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-config-data\") pod \"ceb3076d-8232-44f5-8184-d727ef5c2943\" (UID: \"ceb3076d-8232-44f5-8184-d727ef5c2943\") " Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.389617 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ceb3076d-8232-44f5-8184-d727ef5c2943-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ceb3076d-8232-44f5-8184-d727ef5c2943" (UID: "ceb3076d-8232-44f5-8184-d727ef5c2943"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.396416 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-scripts" (OuterVolumeSpecName: "scripts") pod "ceb3076d-8232-44f5-8184-d727ef5c2943" (UID: "ceb3076d-8232-44f5-8184-d727ef5c2943"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.403514 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ceb3076d-8232-44f5-8184-d727ef5c2943" (UID: "ceb3076d-8232-44f5-8184-d727ef5c2943"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.403548 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceb3076d-8232-44f5-8184-d727ef5c2943-kube-api-access-tnb4r" (OuterVolumeSpecName: "kube-api-access-tnb4r") pod "ceb3076d-8232-44f5-8184-d727ef5c2943" (UID: "ceb3076d-8232-44f5-8184-d727ef5c2943"). InnerVolumeSpecName "kube-api-access-tnb4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.476676 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ceb3076d-8232-44f5-8184-d727ef5c2943" (UID: "ceb3076d-8232-44f5-8184-d727ef5c2943"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.493045 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnb4r\" (UniqueName: \"kubernetes.io/projected/ceb3076d-8232-44f5-8184-d727ef5c2943-kube-api-access-tnb4r\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.493563 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.493575 4848 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ceb3076d-8232-44f5-8184-d727ef5c2943-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.493585 4848 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.493597 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.529235 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-config-data" (OuterVolumeSpecName: "config-data") pod "ceb3076d-8232-44f5-8184-d727ef5c2943" (UID: "ceb3076d-8232-44f5-8184-d727ef5c2943"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.598754 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb3076d-8232-44f5-8184-d727ef5c2943-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.697339 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586f4f6dc5-xc8tj"] Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.734379 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586f4f6dc5-xc8tj"] Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.763971 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-569d7975c-rjrk8"] Jan 28 13:08:22 crc kubenswrapper[4848]: E0128 13:08:22.764627 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceb3076d-8232-44f5-8184-d727ef5c2943" containerName="cinder-db-sync" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.764651 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceb3076d-8232-44f5-8184-d727ef5c2943" containerName="cinder-db-sync" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.764918 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceb3076d-8232-44f5-8184-d727ef5c2943" containerName="cinder-db-sync" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.766524 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.792726 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-569d7975c-rjrk8"] Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.811783 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vjmq\" (UniqueName: \"kubernetes.io/projected/4d9e697a-2a27-44ad-b426-2cb40cddadea-kube-api-access-9vjmq\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.811842 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-config\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.811867 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-nb\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.811969 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-svc\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.812001 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-swift-storage-0\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.812034 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-sb\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.951915 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4d84124-baee-447b-99c9-713b3d13b205" path="/var/lib/kubelet/pods/a4d84124-baee-447b-99c9-713b3d13b205/volumes" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.968458 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-svc\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.972117 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-svc\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.972447 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-swift-storage-0\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.975831 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-swift-storage-0\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.976082 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-sb\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.976328 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vjmq\" (UniqueName: \"kubernetes.io/projected/4d9e697a-2a27-44ad-b426-2cb40cddadea-kube-api-access-9vjmq\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.976832 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-config\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.990331 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-nb\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.977608 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-sb\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.978767 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-config\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:22 crc kubenswrapper[4848]: I0128 13:08:22.991286 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-nb\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.011320 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vjmq\" (UniqueName: \"kubernetes.io/projected/4d9e697a-2a27-44ad-b426-2cb40cddadea-kube-api-access-9vjmq\") pod \"dnsmasq-dns-569d7975c-rjrk8\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.075594 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.093898 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-67d5988776-4bwdg"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.307744 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.382491 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bd96c8879-gdtwm" event={"ID":"a56e7c4e-4ce2-4742-8645-6201f8c957f7","Type":"ContainerStarted","Data":"4e75fc85676a8a7ac3d4b2b7fa162e0ec58339a4fdf2a6f5d64c982e2c523166"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.400560 4848 generic.go:334] "Generic (PLEG): container finished" podID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerID="4a626b3dc7a8d703278a77841e50e1667868bcc46d0844b23a9776bdee530558" exitCode=0 Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.400604 4848 generic.go:334] "Generic (PLEG): container finished" podID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerID="dfb5932cc98a5f499a97317d3e120a3e94e9600f558651bce8cad150d220c057" exitCode=2 Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.400711 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerDied","Data":"4a626b3dc7a8d703278a77841e50e1667868bcc46d0844b23a9776bdee530558"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.400742 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerDied","Data":"dfb5932cc98a5f499a97317d3e120a3e94e9600f558651bce8cad150d220c057"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.410964 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2c33d357-d7c0-4239-a58e-d882b915fafb","Type":"ContainerStarted","Data":"6fc72b07e33ddc3fbc63f28557735a45e610c7b3dba956ebe53b3539827a3242"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.415967 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67d5988776-4bwdg" event={"ID":"5ac34887-0b39-4669-a00c-c40b4a5f5c1a","Type":"ContainerStarted","Data":"10178059e9ce1c184d63e0ae5277959b9cc872dfd978718ee0ad2b7e59233887"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.416003 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67d5988776-4bwdg" event={"ID":"5ac34887-0b39-4669-a00c-c40b4a5f5c1a","Type":"ContainerStarted","Data":"f2edef2fa3b925c6eed27417773a3e3a1830087f351cf69d7c53aaf5a89a05e2"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.419268 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" event={"ID":"7a72021f-6e14-4681-b127-7c85be7c597c","Type":"ContainerStarted","Data":"775165ac3bc7eae7f5c7812e19bd9727b6239a312a52a5aae6f71c3f31980274"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.422938 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" event={"ID":"ead300c1-4c31-4c66-91a5-ac7609850be6","Type":"ContainerStarted","Data":"6d4e694dcefeade65c2785de94b550677800bcad2e4bdaaa95f033ed94ed8903"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.422976 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" event={"ID":"ead300c1-4c31-4c66-91a5-ac7609850be6","Type":"ContainerStarted","Data":"c3a6764ab87409552bf974a0be53227b314095b9e158ce5309c8f7890583ecdb"} Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.423788 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" podUID="ead300c1-4c31-4c66-91a5-ac7609850be6" containerName="init" containerID="cri-o://6d4e694dcefeade65c2785de94b550677800bcad2e4bdaaa95f033ed94ed8903" gracePeriod=10 Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.624437 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.627052 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.629751 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c898f" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.631347 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.632057 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.639136 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.756632 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.776576 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.776718 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.783417 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-569d7975c-rjrk8"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.787868 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.788338 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-67mpv" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.791574 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.795654 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862522 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862637 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862672 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862751 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-655fz\" (UniqueName: \"kubernetes.io/projected/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-kube-api-access-655fz\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862840 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862891 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-scripts\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862920 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-config-data\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.862970 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-scripts\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.863021 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.863059 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-logs\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.863087 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.863138 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrwct\" (UniqueName: \"kubernetes.io/projected/b2c3ecea-44d3-406d-b40a-b1d4515e5764-kube-api-access-zrwct\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.863165 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.871721 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-568df974c9-vxghc"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.875402 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.905450 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.908188 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.914774 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.930740 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568df974c9-vxghc"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.951373 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965673 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965732 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965778 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965802 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965858 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-655fz\" (UniqueName: \"kubernetes.io/projected/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-kube-api-access-655fz\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965898 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965934 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-scripts\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965954 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-config-data\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.965983 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-scripts\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.966006 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.966035 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-logs\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.966053 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.966094 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrwct\" (UniqueName: \"kubernetes.io/projected/b2c3ecea-44d3-406d-b40a-b1d4515e5764-kube-api-access-zrwct\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.969791 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.971884 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-logs\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.973775 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.979725 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:23 crc kubenswrapper[4848]: I0128 13:08:23.996400 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.013589 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.016520 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-scripts\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.024099 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrwct\" (UniqueName: \"kubernetes.io/projected/b2c3ecea-44d3-406d-b40a-b1d4515e5764-kube-api-access-zrwct\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.030411 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.030853 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.033861 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-config-data\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.037904 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-scripts\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.059110 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-655fz\" (UniqueName: \"kubernetes.io/projected/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-kube-api-access-655fz\") pod \"cinder-scheduler-0\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068664 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068719 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-svc\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068756 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-nb\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068780 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b8f2\" (UniqueName: \"kubernetes.io/projected/9e16da65-eb57-4041-90f2-00243246dabc-kube-api-access-8b8f2\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068819 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068849 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068882 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-config\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068921 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5bcj\" (UniqueName: \"kubernetes.io/projected/6832a35d-3728-4d58-9960-96044664057b-kube-api-access-g5bcj\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068940 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-swift-storage-0\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068975 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-sb\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.068992 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.069013 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-logs\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.069041 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.111063 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.112925 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.112943 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.124143 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.139818 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.151033 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185175 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185309 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-config\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185452 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5bcj\" (UniqueName: \"kubernetes.io/projected/6832a35d-3728-4d58-9960-96044664057b-kube-api-access-g5bcj\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185473 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-swift-storage-0\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185695 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-sb\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185731 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185765 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-logs\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185831 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185946 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.185992 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-svc\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.201679 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-nb\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.206105 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.206853 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-config\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.207626 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-sb\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.207824 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.208392 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-swift-storage-0\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.209632 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-logs\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.211125 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-svc\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.211313 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-nb\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.217676 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b8f2\" (UniqueName: \"kubernetes.io/projected/9e16da65-eb57-4041-90f2-00243246dabc-kube-api-access-8b8f2\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.220556 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.257779 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.259965 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.290890 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.291731 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b8f2\" (UniqueName: \"kubernetes.io/projected/9e16da65-eb57-4041-90f2-00243246dabc-kube-api-access-8b8f2\") pod \"dnsmasq-dns-568df974c9-vxghc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.291946 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5bcj\" (UniqueName: \"kubernetes.io/projected/6832a35d-3728-4d58-9960-96044664057b-kube-api-access-g5bcj\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.331527 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vwc6\" (UniqueName: \"kubernetes.io/projected/5eebd755-02f3-4d5b-8658-9620128db59c-kube-api-access-8vwc6\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.331592 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-scripts\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.331712 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data-custom\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.331735 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.331772 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5eebd755-02f3-4d5b-8658-9620128db59c-logs\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.331818 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eebd755-02f3-4d5b-8658-9620128db59c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.331902 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.335572 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.359230 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.395555 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-569d7975c-rjrk8"] Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.433463 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eebd755-02f3-4d5b-8658-9620128db59c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.433600 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.433658 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vwc6\" (UniqueName: \"kubernetes.io/projected/5eebd755-02f3-4d5b-8658-9620128db59c-kube-api-access-8vwc6\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.433684 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-scripts\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.433750 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data-custom\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.433771 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.433802 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5eebd755-02f3-4d5b-8658-9620128db59c-logs\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.434388 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5eebd755-02f3-4d5b-8658-9620128db59c-logs\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.434453 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eebd755-02f3-4d5b-8658-9620128db59c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.450303 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data-custom\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.450885 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.453415 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.454429 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-scripts\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.465068 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vwc6\" (UniqueName: \"kubernetes.io/projected/5eebd755-02f3-4d5b-8658-9620128db59c-kube-api-access-8vwc6\") pod \"cinder-api-0\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.479874 4848 generic.go:334] "Generic (PLEG): container finished" podID="ead300c1-4c31-4c66-91a5-ac7609850be6" containerID="6d4e694dcefeade65c2785de94b550677800bcad2e4bdaaa95f033ed94ed8903" exitCode=0 Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.479976 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" event={"ID":"ead300c1-4c31-4c66-91a5-ac7609850be6","Type":"ContainerDied","Data":"6d4e694dcefeade65c2785de94b550677800bcad2e4bdaaa95f033ed94ed8903"} Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.499688 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2c33d357-d7c0-4239-a58e-d882b915fafb","Type":"ContainerStarted","Data":"524e15d9f056584738036f955a24b016b56d12e54b556221ff3d39da66bb0cff"} Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.499771 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2c33d357-d7c0-4239-a58e-d882b915fafb","Type":"ContainerStarted","Data":"742602c8cd0eaa8cefed00f7ad64bfa51e0ed1f4a025d7f6f8005391f31ee986"} Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.503799 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.531398 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2c33d357-d7c0-4239-a58e-d882b915fafb" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.174:9322/\": dial tcp 10.217.0.174:9322: connect: connection refused" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.551379 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=3.551345178 podStartE2EDuration="3.551345178s" podCreationTimestamp="2026-01-28 13:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:24.524132547 +0000 UTC m=+1331.436349585" watchObservedRunningTime="2026-01-28 13:08:24.551345178 +0000 UTC m=+1331.463562216" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.557463 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.561698 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67d5988776-4bwdg" event={"ID":"5ac34887-0b39-4669-a00c-c40b4a5f5c1a","Type":"ContainerStarted","Data":"90cad634593a189984f808c2b97d3b77e7b25ecaf5e61f2c0591d941514cea8d"} Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.562725 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.562853 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.567824 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.578995 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.588845 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" event={"ID":"4d9e697a-2a27-44ad-b426-2cb40cddadea","Type":"ContainerStarted","Data":"215343412c6349f837ab98c9b25f3b6a7417c316325635f2572f1e4c681896cc"} Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.601282 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.646233 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-config\") pod \"ead300c1-4c31-4c66-91a5-ac7609850be6\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.646728 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-swift-storage-0\") pod \"ead300c1-4c31-4c66-91a5-ac7609850be6\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.646892 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-nb\") pod \"ead300c1-4c31-4c66-91a5-ac7609850be6\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.647004 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-svc\") pod \"ead300c1-4c31-4c66-91a5-ac7609850be6\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.647615 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvbvl\" (UniqueName: \"kubernetes.io/projected/ead300c1-4c31-4c66-91a5-ac7609850be6-kube-api-access-kvbvl\") pod \"ead300c1-4c31-4c66-91a5-ac7609850be6\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.647685 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-sb\") pod \"ead300c1-4c31-4c66-91a5-ac7609850be6\" (UID: \"ead300c1-4c31-4c66-91a5-ac7609850be6\") " Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.691508 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ead300c1-4c31-4c66-91a5-ac7609850be6" (UID: "ead300c1-4c31-4c66-91a5-ac7609850be6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.695454 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-67d5988776-4bwdg" podStartSLOduration=3.695430778 podStartE2EDuration="3.695430778s" podCreationTimestamp="2026-01-28 13:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:24.612864557 +0000 UTC m=+1331.525081595" watchObservedRunningTime="2026-01-28 13:08:24.695430778 +0000 UTC m=+1331.607647816" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.697452 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ead300c1-4c31-4c66-91a5-ac7609850be6-kube-api-access-kvbvl" (OuterVolumeSpecName: "kube-api-access-kvbvl") pod "ead300c1-4c31-4c66-91a5-ac7609850be6" (UID: "ead300c1-4c31-4c66-91a5-ac7609850be6"). InnerVolumeSpecName "kube-api-access-kvbvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.710223 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ead300c1-4c31-4c66-91a5-ac7609850be6" (UID: "ead300c1-4c31-4c66-91a5-ac7609850be6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.745632 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-config" (OuterVolumeSpecName: "config") pod "ead300c1-4c31-4c66-91a5-ac7609850be6" (UID: "ead300c1-4c31-4c66-91a5-ac7609850be6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.766206 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.766287 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvbvl\" (UniqueName: \"kubernetes.io/projected/ead300c1-4c31-4c66-91a5-ac7609850be6-kube-api-access-kvbvl\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.766306 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.766318 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.787154 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ead300c1-4c31-4c66-91a5-ac7609850be6" (UID: "ead300c1-4c31-4c66-91a5-ac7609850be6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.788702 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ead300c1-4c31-4c66-91a5-ac7609850be6" (UID: "ead300c1-4c31-4c66-91a5-ac7609850be6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.868891 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:24 crc kubenswrapper[4848]: I0128 13:08:24.868936 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ead300c1-4c31-4c66-91a5-ac7609850be6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:25 crc kubenswrapper[4848]: I0128 13:08:25.052491 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:25 crc kubenswrapper[4848]: I0128 13:08:25.442920 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:25 crc kubenswrapper[4848]: I0128 13:08:25.609051 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" event={"ID":"ead300c1-4c31-4c66-91a5-ac7609850be6","Type":"ContainerDied","Data":"c3a6764ab87409552bf974a0be53227b314095b9e158ce5309c8f7890583ecdb"} Jan 28 13:08:25 crc kubenswrapper[4848]: I0128 13:08:25.609150 4848 scope.go:117] "RemoveContainer" containerID="6d4e694dcefeade65c2785de94b550677800bcad2e4bdaaa95f033ed94ed8903" Jan 28 13:08:25 crc kubenswrapper[4848]: I0128 13:08:25.609185 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586f4f6dc5-xc8tj" Jan 28 13:08:25 crc kubenswrapper[4848]: I0128 13:08:25.702308 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586f4f6dc5-xc8tj"] Jan 28 13:08:25 crc kubenswrapper[4848]: I0128 13:08:25.713267 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586f4f6dc5-xc8tj"] Jan 28 13:08:26 crc kubenswrapper[4848]: I0128 13:08:26.548529 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 13:08:26 crc kubenswrapper[4848]: I0128 13:08:26.587010 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68f5655b9d-76qsp" podUID="dfa56dc1-1635-454c-95e0-74fdedcf8b00" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 13:08:26 crc kubenswrapper[4848]: W0128 13:08:26.591186 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2c3ecea_44d3_406d_b40a_b1d4515e5764.slice/crio-6bb43f3aea45a7a30d306eb78ee0342b5a7c063811923708c89ac5e3f92de2d6 WatchSource:0}: Error finding container 6bb43f3aea45a7a30d306eb78ee0342b5a7c063811923708c89ac5e3f92de2d6: Status 404 returned error can't find the container with id 6bb43f3aea45a7a30d306eb78ee0342b5a7c063811923708c89ac5e3f92de2d6 Jan 28 13:08:26 crc kubenswrapper[4848]: I0128 13:08:26.621980 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b2c3ecea-44d3-406d-b40a-b1d4515e5764","Type":"ContainerStarted","Data":"6bb43f3aea45a7a30d306eb78ee0342b5a7c063811923708c89ac5e3f92de2d6"} Jan 28 13:08:26 crc kubenswrapper[4848]: I0128 13:08:26.624778 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ffe5c892-4aa9-4e59-9b91-84f8728ece0c","Type":"ContainerStarted","Data":"cbb6cc0eb6a62767af98c5c6c2334f2fb336c6c2b70a7b01c2e5e9cb9c931dd0"} Jan 28 13:08:26 crc kubenswrapper[4848]: I0128 13:08:26.863114 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ead300c1-4c31-4c66-91a5-ac7609850be6" path="/var/lib/kubelet/pods/ead300c1-4c31-4c66-91a5-ac7609850be6/volumes" Jan 28 13:08:27 crc kubenswrapper[4848]: I0128 13:08:27.048314 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 28 13:08:27 crc kubenswrapper[4848]: I0128 13:08:27.689383 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" event={"ID":"4d9e697a-2a27-44ad-b426-2cb40cddadea","Type":"ContainerStarted","Data":"13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4"} Jan 28 13:08:27 crc kubenswrapper[4848]: I0128 13:08:27.690405 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" podUID="4d9e697a-2a27-44ad-b426-2cb40cddadea" containerName="init" containerID="cri-o://13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4" gracePeriod=10 Jan 28 13:08:27 crc kubenswrapper[4848]: I0128 13:08:27.830147 4848 generic.go:334] "Generic (PLEG): container finished" podID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerID="9b5f576212402a219c02a2fc8cb5e5921b83df0b4d58e213d3d6eb28e9e919ac" exitCode=0 Jan 28 13:08:27 crc kubenswrapper[4848]: I0128 13:08:27.830358 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:08:27 crc kubenswrapper[4848]: I0128 13:08:27.831768 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerDied","Data":"9b5f576212402a219c02a2fc8cb5e5921b83df0b4d58e213d3d6eb28e9e919ac"} Jan 28 13:08:27 crc kubenswrapper[4848]: I0128 13:08:27.851267 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.093951 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.141952 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.204090 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-log-httpd\") pod \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.204312 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-combined-ca-bundle\") pod \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.204434 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-config-data\") pod \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.204469 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-sg-core-conf-yaml\") pod \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.204540 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-scripts\") pod \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.204804 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plhf6\" (UniqueName: \"kubernetes.io/projected/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-kube-api-access-plhf6\") pod \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.205682 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-run-httpd\") pod \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\" (UID: \"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.215533 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" (UID: "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.215682 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" (UID: "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.231153 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-scripts" (OuterVolumeSpecName: "scripts") pod "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" (UID: "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.231304 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-kube-api-access-plhf6" (OuterVolumeSpecName: "kube-api-access-plhf6") pod "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" (UID: "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff"). InnerVolumeSpecName "kube-api-access-plhf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.275534 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" (UID: "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.279415 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568df974c9-vxghc"] Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.315573 4848 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.315615 4848 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.315628 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.315640 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plhf6\" (UniqueName: \"kubernetes.io/projected/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-kube-api-access-plhf6\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.315656 4848 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.398627 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-config-data" (OuterVolumeSpecName: "config-data") pod "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" (UID: "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.418843 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.430782 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" (UID: "b0ac205d-682b-4cb7-b5eb-05a9b1f710ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.520658 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.523233 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.622165 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vjmq\" (UniqueName: \"kubernetes.io/projected/4d9e697a-2a27-44ad-b426-2cb40cddadea-kube-api-access-9vjmq\") pod \"4d9e697a-2a27-44ad-b426-2cb40cddadea\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.622256 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-nb\") pod \"4d9e697a-2a27-44ad-b426-2cb40cddadea\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.622296 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-sb\") pod \"4d9e697a-2a27-44ad-b426-2cb40cddadea\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.622462 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-swift-storage-0\") pod \"4d9e697a-2a27-44ad-b426-2cb40cddadea\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.622510 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-config\") pod \"4d9e697a-2a27-44ad-b426-2cb40cddadea\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.622589 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-svc\") pod \"4d9e697a-2a27-44ad-b426-2cb40cddadea\" (UID: \"4d9e697a-2a27-44ad-b426-2cb40cddadea\") " Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.643668 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d9e697a-2a27-44ad-b426-2cb40cddadea-kube-api-access-9vjmq" (OuterVolumeSpecName: "kube-api-access-9vjmq") pod "4d9e697a-2a27-44ad-b426-2cb40cddadea" (UID: "4d9e697a-2a27-44ad-b426-2cb40cddadea"). InnerVolumeSpecName "kube-api-access-9vjmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.725361 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vjmq\" (UniqueName: \"kubernetes.io/projected/4d9e697a-2a27-44ad-b426-2cb40cddadea-kube-api-access-9vjmq\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.783172 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4d9e697a-2a27-44ad-b426-2cb40cddadea" (UID: "4d9e697a-2a27-44ad-b426-2cb40cddadea"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.827795 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.839379 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d9e697a-2a27-44ad-b426-2cb40cddadea" (UID: "4d9e697a-2a27-44ad-b426-2cb40cddadea"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.877798 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4d9e697a-2a27-44ad-b426-2cb40cddadea" (UID: "4d9e697a-2a27-44ad-b426-2cb40cddadea"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.878752 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-config" (OuterVolumeSpecName: "config") pod "4d9e697a-2a27-44ad-b426-2cb40cddadea" (UID: "4d9e697a-2a27-44ad-b426-2cb40cddadea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.879304 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4d9e697a-2a27-44ad-b426-2cb40cddadea" (UID: "4d9e697a-2a27-44ad-b426-2cb40cddadea"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.923627 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6832a35d-3728-4d58-9960-96044664057b","Type":"ContainerStarted","Data":"ebca748a724515015996c9ec75d56e70851b4f24fafaed1a3685d0e4fd52dc16"} Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.932174 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568df974c9-vxghc" event={"ID":"9e16da65-eb57-4041-90f2-00243246dabc","Type":"ContainerStarted","Data":"677a72c01d91c876b5357f78fedb7c1ab2373790b642002d88b64dbfb5307337"} Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.939993 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" event={"ID":"7a72021f-6e14-4681-b127-7c85be7c597c","Type":"ContainerStarted","Data":"fb5c9058c6162186f59551463e1f0dac6a554d2357caf7d80f9b479b0fef8896"} Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.944883 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.944934 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.944945 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.944956 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d9e697a-2a27-44ad-b426-2cb40cddadea-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.961342 4848 generic.go:334] "Generic (PLEG): container finished" podID="4d9e697a-2a27-44ad-b426-2cb40cddadea" containerID="13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4" exitCode=0 Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.961612 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.966821 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" event={"ID":"4d9e697a-2a27-44ad-b426-2cb40cddadea","Type":"ContainerDied","Data":"13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4"} Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.966905 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-569d7975c-rjrk8" event={"ID":"4d9e697a-2a27-44ad-b426-2cb40cddadea","Type":"ContainerDied","Data":"215343412c6349f837ab98c9b25f3b6a7417c316325635f2572f1e4c681896cc"} Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.966935 4848 scope.go:117] "RemoveContainer" containerID="13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4" Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.992999 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5eebd755-02f3-4d5b-8658-9620128db59c","Type":"ContainerStarted","Data":"c8e7c85ee66d6e1a1677222adc8e5e4e89a7f0e67b7aa7e51ebb2812f461f15a"} Jan 28 13:08:28 crc kubenswrapper[4848]: I0128 13:08:28.999585 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bd96c8879-gdtwm" event={"ID":"a56e7c4e-4ce2-4742-8645-6201f8c957f7","Type":"ContainerStarted","Data":"6d82c307adab61173c0339334ac088f54bdd9712921883c2cf096847db23d3d7"} Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.030646 4848 generic.go:334] "Generic (PLEG): container finished" podID="88151fad-4442-4d32-a675-f89f070ed086" containerID="3e40610fb6105a3e7609422162fe2823d3412ba91eac26dc9908de2ad373294c" exitCode=1 Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.030743 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerDied","Data":"3e40610fb6105a3e7609422162fe2823d3412ba91eac26dc9908de2ad373294c"} Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.035081 4848 scope.go:117] "RemoveContainer" containerID="3e40610fb6105a3e7609422162fe2823d3412ba91eac26dc9908de2ad373294c" Jan 28 13:08:29 crc kubenswrapper[4848]: E0128 13:08:29.036040 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(88151fad-4442-4d32-a675-f89f070ed086)\"" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.039520 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-bd96c8879-gdtwm" podStartSLOduration=4.187986472 podStartE2EDuration="9.03949164s" podCreationTimestamp="2026-01-28 13:08:20 +0000 UTC" firstStartedPulling="2026-01-28 13:08:22.314179842 +0000 UTC m=+1329.226396880" lastFinishedPulling="2026-01-28 13:08:27.16568501 +0000 UTC m=+1334.077902048" observedRunningTime="2026-01-28 13:08:29.020861326 +0000 UTC m=+1335.933078364" watchObservedRunningTime="2026-01-28 13:08:29.03949164 +0000 UTC m=+1335.951708678" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.063523 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0ac205d-682b-4cb7-b5eb-05a9b1f710ff","Type":"ContainerDied","Data":"2883f3b9a69a8ab9b1ac531683d5ed593037110a08bb6b001cf8859154bc7eb8"} Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.063739 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.176054 4848 scope.go:117] "RemoveContainer" containerID="13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4" Jan 28 13:08:29 crc kubenswrapper[4848]: E0128 13:08:29.178477 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4\": container with ID starting with 13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4 not found: ID does not exist" containerID="13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.178545 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4"} err="failed to get container status \"13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4\": rpc error: code = NotFound desc = could not find container \"13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4\": container with ID starting with 13decd47a3e3bf12971c696c2b7a9893f6158e3d3a4d6b7049c3e7910c1ab2c4 not found: ID does not exist" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.178584 4848 scope.go:117] "RemoveContainer" containerID="001771066b162018218fe226fbe7ad7fc7b182a456cc24588b12072a66c88b2a" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.272681 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.308392 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.316315 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.316380 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.321938 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:08:29 crc kubenswrapper[4848]: E0128 13:08:29.322567 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ead300c1-4c31-4c66-91a5-ac7609850be6" containerName="init" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322582 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ead300c1-4c31-4c66-91a5-ac7609850be6" containerName="init" Jan 28 13:08:29 crc kubenswrapper[4848]: E0128 13:08:29.322604 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="sg-core" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322611 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="sg-core" Jan 28 13:08:29 crc kubenswrapper[4848]: E0128 13:08:29.322633 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="proxy-httpd" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322639 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="proxy-httpd" Jan 28 13:08:29 crc kubenswrapper[4848]: E0128 13:08:29.322652 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d9e697a-2a27-44ad-b426-2cb40cddadea" containerName="init" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322658 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d9e697a-2a27-44ad-b426-2cb40cddadea" containerName="init" Jan 28 13:08:29 crc kubenswrapper[4848]: E0128 13:08:29.322675 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="ceilometer-notification-agent" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322681 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="ceilometer-notification-agent" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322895 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="ceilometer-notification-agent" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322912 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="proxy-httpd" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322927 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ead300c1-4c31-4c66-91a5-ac7609850be6" containerName="init" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322937 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d9e697a-2a27-44ad-b426-2cb40cddadea" containerName="init" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.322944 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" containerName="sg-core" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.325023 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.331655 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.331954 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.378148 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.387880 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-569d7975c-rjrk8"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.411592 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.435427 4848 scope.go:117] "RemoveContainer" containerID="4a626b3dc7a8d703278a77841e50e1667868bcc46d0844b23a9776bdee530558" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.451032 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.467678 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-569d7975c-rjrk8"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.480063 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-config-data\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.480155 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k56gw\" (UniqueName: \"kubernetes.io/projected/a2907bca-1e85-4d6e-a716-a495b7affeaa-kube-api-access-k56gw\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.480306 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.480358 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-scripts\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.480411 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-log-httpd\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.480439 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-run-httpd\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.480481 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.572337 4848 scope.go:117] "RemoveContainer" containerID="dfb5932cc98a5f499a97317d3e120a3e94e9600f558651bce8cad150d220c057" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.590873 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-config-data\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.590955 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k56gw\" (UniqueName: \"kubernetes.io/projected/a2907bca-1e85-4d6e-a716-a495b7affeaa-kube-api-access-k56gw\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.591110 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.591160 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-scripts\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.591224 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-log-httpd\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.591280 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-run-httpd\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.591326 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.591997 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-log-httpd\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.597746 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-scripts\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.597793 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-config-data\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.598310 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-run-httpd\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.612854 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.616464 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.667068 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k56gw\" (UniqueName: \"kubernetes.io/projected/a2907bca-1e85-4d6e-a716-a495b7affeaa-kube-api-access-k56gw\") pod \"ceilometer-0\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.678471 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.690878 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.860575 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.895435 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.898163 4848 scope.go:117] "RemoveContainer" containerID="9b5f576212402a219c02a2fc8cb5e5921b83df0b4d58e213d3d6eb28e9e919ac" Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.929298 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-988d7f849-l8xdb"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.929605 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-988d7f849-l8xdb" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-api" containerID="cri-o://e37496aa995c774aa528b58d0551e096b7b42b5a7f81dfe53a540ad71e104029" gracePeriod=30 Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.929758 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-988d7f849-l8xdb" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-httpd" containerID="cri-o://9398e20b03b8ae080f593bd5259c6dc97990067a5daec6123799379e953cdc10" gracePeriod=30 Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.991170 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6997cd7cdf-nf254"] Jan 28 13:08:29 crc kubenswrapper[4848]: I0128 13:08:29.993211 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.008485 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6997cd7cdf-nf254"] Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.119733 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-internal-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.119901 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-config\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.119940 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-public-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.120076 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-ovndb-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.120135 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-httpd-config\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.120163 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-combined-ca-bundle\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.120259 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrg8d\" (UniqueName: \"kubernetes.io/projected/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-kube-api-access-mrg8d\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.193340 4848 scope.go:117] "RemoveContainer" containerID="3e40610fb6105a3e7609422162fe2823d3412ba91eac26dc9908de2ad373294c" Jan 28 13:08:30 crc kubenswrapper[4848]: E0128 13:08:30.193615 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(88151fad-4442-4d32-a675-f89f070ed086)\"" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.225925 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-internal-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.226348 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-config\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.226386 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-public-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.226466 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-ovndb-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.226528 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-httpd-config\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.226552 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-combined-ca-bundle\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.226625 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrg8d\" (UniqueName: \"kubernetes.io/projected/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-kube-api-access-mrg8d\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.236015 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-combined-ca-bundle\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.243365 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-ovndb-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.243365 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-httpd-config\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.243704 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-config\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.244304 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-internal-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.255932 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-public-tls-certs\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.259642 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b2c3ecea-44d3-406d-b40a-b1d4515e5764","Type":"ContainerStarted","Data":"28bf47550d47c6dd58fe91c37f0fc251164e4250d0b3ee246f0f0fff62969ac8"} Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.261887 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrg8d\" (UniqueName: \"kubernetes.io/projected/ac7966e3-99c4-4e7c-b2d6-7229c78ca5db-kube-api-access-mrg8d\") pod \"neutron-6997cd7cdf-nf254\" (UID: \"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db\") " pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.311012 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" event={"ID":"7a72021f-6e14-4681-b127-7c85be7c597c","Type":"ContainerStarted","Data":"dd593ed277c2321ad29c25de8b456f6140c3076eb7f0b55e64a621ffe14aea86"} Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.323807 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5eebd755-02f3-4d5b-8658-9620128db59c","Type":"ContainerStarted","Data":"8009cb77068167102fdbe0b9dcd35b5805bf6f3b97352e51f76e770bac818737"} Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.333985 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-988d7f849-l8xdb" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.167:9696/\": read tcp 10.217.0.2:54006->10.217.0.167:9696: read: connection reset by peer" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.336915 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5cb69d9f6b-f9ck5" podStartSLOduration=5.50199849 podStartE2EDuration="10.33689134s" podCreationTimestamp="2026-01-28 13:08:20 +0000 UTC" firstStartedPulling="2026-01-28 13:08:22.315076607 +0000 UTC m=+1329.227293645" lastFinishedPulling="2026-01-28 13:08:27.149969457 +0000 UTC m=+1334.062186495" observedRunningTime="2026-01-28 13:08:30.334397722 +0000 UTC m=+1337.246614760" watchObservedRunningTime="2026-01-28 13:08:30.33689134 +0000 UTC m=+1337.249108378" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.346299 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bd96c8879-gdtwm" event={"ID":"a56e7c4e-4ce2-4742-8645-6201f8c957f7","Type":"ContainerStarted","Data":"72e8a1a88ed11733ef84d35f0d1f90731e2e554e18ff30a4742760a3d238f428"} Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.381721 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.464774 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6832a35d-3728-4d58-9960-96044664057b","Type":"ContainerStarted","Data":"8dbdfe99eb7840d0e6007501ccfd03e7f0a273c85743078be2c1ccb229be2514"} Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.499952 4848 generic.go:334] "Generic (PLEG): container finished" podID="9e16da65-eb57-4041-90f2-00243246dabc" containerID="af598574efbb1972543573cfc16add6033cdd0aabae6d503b844c928a50bf80b" exitCode=0 Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.500537 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568df974c9-vxghc" event={"ID":"9e16da65-eb57-4041-90f2-00243246dabc","Type":"ContainerDied","Data":"af598574efbb1972543573cfc16add6033cdd0aabae6d503b844c928a50bf80b"} Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.764026 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:08:30 crc kubenswrapper[4848]: W0128 13:08:30.826286 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2907bca_1e85_4d6e_a716_a495b7affeaa.slice/crio-546db06f5c37572d1b47a875b4ace2b0abc3fd93a43020a3662e87029c182b9c WatchSource:0}: Error finding container 546db06f5c37572d1b47a875b4ace2b0abc3fd93a43020a3662e87029c182b9c: Status 404 returned error can't find the container with id 546db06f5c37572d1b47a875b4ace2b0abc3fd93a43020a3662e87029c182b9c Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.870768 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d9e697a-2a27-44ad-b426-2cb40cddadea" path="/var/lib/kubelet/pods/4d9e697a-2a27-44ad-b426-2cb40cddadea/volumes" Jan 28 13:08:30 crc kubenswrapper[4848]: I0128 13:08:30.871411 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0ac205d-682b-4cb7-b5eb-05a9b1f710ff" path="/var/lib/kubelet/pods/b0ac205d-682b-4cb7-b5eb-05a9b1f710ff/volumes" Jan 28 13:08:31 crc kubenswrapper[4848]: I0128 13:08:31.217688 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6997cd7cdf-nf254"] Jan 28 13:08:31 crc kubenswrapper[4848]: W0128 13:08:31.245997 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac7966e3_99c4_4e7c_b2d6_7229c78ca5db.slice/crio-d7be2fa9ee724d49c34189c5d8dbf4f1ac8ddf6d76f23d0b9be299aab3bd80d0 WatchSource:0}: Error finding container d7be2fa9ee724d49c34189c5d8dbf4f1ac8ddf6d76f23d0b9be299aab3bd80d0: Status 404 returned error can't find the container with id d7be2fa9ee724d49c34189c5d8dbf4f1ac8ddf6d76f23d0b9be299aab3bd80d0 Jan 28 13:08:31 crc kubenswrapper[4848]: I0128 13:08:31.515224 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerStarted","Data":"546db06f5c37572d1b47a875b4ace2b0abc3fd93a43020a3662e87029c182b9c"} Jan 28 13:08:31 crc kubenswrapper[4848]: I0128 13:08:31.518610 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6997cd7cdf-nf254" event={"ID":"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db","Type":"ContainerStarted","Data":"d7be2fa9ee724d49c34189c5d8dbf4f1ac8ddf6d76f23d0b9be299aab3bd80d0"} Jan 28 13:08:31 crc kubenswrapper[4848]: I0128 13:08:31.529097 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ffe5c892-4aa9-4e59-9b91-84f8728ece0c","Type":"ContainerStarted","Data":"6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08"} Jan 28 13:08:31 crc kubenswrapper[4848]: I0128 13:08:31.655020 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-988d7f849-l8xdb" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.167:9696/\": dial tcp 10.217.0.167:9696: connect: connection refused" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.036064 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-669cc887b-rnh7b"] Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.061181 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.061645 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.072618 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.073868 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.094047 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-669cc887b-rnh7b"] Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.170235 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-combined-ca-bundle\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.170803 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-public-tls-certs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.170875 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-config-data\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.170902 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cjjf\" (UniqueName: \"kubernetes.io/projected/feba4e43-dc8d-455e-a760-82f68f781511-kube-api-access-7cjjf\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.170941 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-config-data-custom\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.170979 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-internal-tls-certs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.171036 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feba4e43-dc8d-455e-a760-82f68f781511-logs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.275901 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-config-data\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.275983 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cjjf\" (UniqueName: \"kubernetes.io/projected/feba4e43-dc8d-455e-a760-82f68f781511-kube-api-access-7cjjf\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.276042 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-config-data-custom\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.276097 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-internal-tls-certs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.276154 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feba4e43-dc8d-455e-a760-82f68f781511-logs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.276270 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-combined-ca-bundle\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.276333 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-public-tls-certs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.278810 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feba4e43-dc8d-455e-a760-82f68f781511-logs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.308819 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cjjf\" (UniqueName: \"kubernetes.io/projected/feba4e43-dc8d-455e-a760-82f68f781511-kube-api-access-7cjjf\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.309597 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-config-data-custom\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.309734 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-internal-tls-certs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.311057 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-public-tls-certs\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.311767 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.311986 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-combined-ca-bundle\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.312462 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feba4e43-dc8d-455e-a760-82f68f781511-config-data\") pod \"barbican-api-669cc887b-rnh7b\" (UID: \"feba4e43-dc8d-455e-a760-82f68f781511\") " pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.486859 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.608697 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api-log" containerID="cri-o://8009cb77068167102fdbe0b9dcd35b5805bf6f3b97352e51f76e770bac818737" gracePeriod=30 Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.609120 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.609571 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api" containerID="cri-o://7b190a1936b18e6a0594ad7d98ae8d062176ac47c0ab2fbc8c5d65c06ec0c0d9" gracePeriod=30 Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.637884 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=8.637858089 podStartE2EDuration="8.637858089s" podCreationTimestamp="2026-01-28 13:08:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:32.629376574 +0000 UTC m=+1339.541593602" watchObservedRunningTime="2026-01-28 13:08:32.637858089 +0000 UTC m=+1339.550075127" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.640745 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6832a35d-3728-4d58-9960-96044664057b","Type":"ContainerStarted","Data":"3364f35f7251e65babcc5729255091d4ca53cd08834412ad0965ac45d1a74400"} Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.640952 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-log" containerID="cri-o://8dbdfe99eb7840d0e6007501ccfd03e7f0a273c85743078be2c1ccb229be2514" gracePeriod=30 Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.641618 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-httpd" containerID="cri-o://3364f35f7251e65babcc5729255091d4ca53cd08834412ad0965ac45d1a74400" gracePeriod=30 Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.662538 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b2c3ecea-44d3-406d-b40a-b1d4515e5764","Type":"ContainerStarted","Data":"1a15a5f3c078457dd662815ad13c74dd0b5bb68ea375ecd6282c9494f791d303"} Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.662782 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-log" containerID="cri-o://28bf47550d47c6dd58fe91c37f0fc251164e4250d0b3ee246f0f0fff62969ac8" gracePeriod=30 Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.663144 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-httpd" containerID="cri-o://1a15a5f3c078457dd662815ad13c74dd0b5bb68ea375ecd6282c9494f791d303" gracePeriod=30 Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.679824 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=10.679798447 podStartE2EDuration="10.679798447s" podCreationTimestamp="2026-01-28 13:08:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:32.673277736 +0000 UTC m=+1339.585494804" watchObservedRunningTime="2026-01-28 13:08:32.679798447 +0000 UTC m=+1339.592015485" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.697949 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6997cd7cdf-nf254" event={"ID":"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db","Type":"ContainerStarted","Data":"5e5fba5576a601dc07aa5a9ded960c09d4757f2e4098564ec69e321d8e0c6493"} Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.698023 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6997cd7cdf-nf254" event={"ID":"ac7966e3-99c4-4e7c-b2d6-7229c78ca5db","Type":"ContainerStarted","Data":"43e779063607fb043af0dcba04fcaca4fcd47b15b037d4841fe47773e30d0cac"} Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.698370 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.710803 4848 generic.go:334] "Generic (PLEG): container finished" podID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerID="9398e20b03b8ae080f593bd5259c6dc97990067a5daec6123799379e953cdc10" exitCode=0 Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.710925 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-988d7f849-l8xdb" event={"ID":"6964a7ca-3376-4df2-8a5d-bb63e731b0a8","Type":"ContainerDied","Data":"9398e20b03b8ae080f593bd5259c6dc97990067a5daec6123799379e953cdc10"} Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.725996 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.722938498 podStartE2EDuration="10.722938498s" podCreationTimestamp="2026-01-28 13:08:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:32.719834603 +0000 UTC m=+1339.632051641" watchObservedRunningTime="2026-01-28 13:08:32.722938498 +0000 UTC m=+1339.635155536" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.736331 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568df974c9-vxghc" event={"ID":"9e16da65-eb57-4041-90f2-00243246dabc","Type":"ContainerStarted","Data":"0f31e1333be2ad20c0ef1291d9bf9da5a5196adaaef2223b664eca73dd619598"} Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.736379 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.778593 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.814442 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6997cd7cdf-nf254" podStartSLOduration=3.814408785 podStartE2EDuration="3.814408785s" podCreationTimestamp="2026-01-28 13:08:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:32.754997414 +0000 UTC m=+1339.667214462" watchObservedRunningTime="2026-01-28 13:08:32.814408785 +0000 UTC m=+1339.726625833" Jan 28 13:08:32 crc kubenswrapper[4848]: I0128 13:08:32.898415 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-568df974c9-vxghc" podStartSLOduration=9.898352923000001 podStartE2EDuration="9.898352923s" podCreationTimestamp="2026-01-28 13:08:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:32.804553062 +0000 UTC m=+1339.716770100" watchObservedRunningTime="2026-01-28 13:08:32.898352923 +0000 UTC m=+1339.810569961" Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.064340 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-67d5988776-4bwdg" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.569350 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-669cc887b-rnh7b"] Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.819817 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ffe5c892-4aa9-4e59-9b91-84f8728ece0c","Type":"ContainerStarted","Data":"35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.840097 4848 generic.go:334] "Generic (PLEG): container finished" podID="5eebd755-02f3-4d5b-8658-9620128db59c" containerID="8009cb77068167102fdbe0b9dcd35b5805bf6f3b97352e51f76e770bac818737" exitCode=143 Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.840204 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5eebd755-02f3-4d5b-8658-9620128db59c","Type":"ContainerStarted","Data":"7b190a1936b18e6a0594ad7d98ae8d062176ac47c0ab2fbc8c5d65c06ec0c0d9"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.840241 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5eebd755-02f3-4d5b-8658-9620128db59c","Type":"ContainerDied","Data":"8009cb77068167102fdbe0b9dcd35b5805bf6f3b97352e51f76e770bac818737"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.856615 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-669cc887b-rnh7b" event={"ID":"feba4e43-dc8d-455e-a760-82f68f781511","Type":"ContainerStarted","Data":"b8cdb0dad004c8e7e3bb9612cff85117ca3943de990e9ec1acd497b95b2a76ca"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.860769 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=9.386992661 podStartE2EDuration="10.860742152s" podCreationTimestamp="2026-01-28 13:08:23 +0000 UTC" firstStartedPulling="2026-01-28 13:08:26.57217303 +0000 UTC m=+1333.484390068" lastFinishedPulling="2026-01-28 13:08:28.045922531 +0000 UTC m=+1334.958139559" observedRunningTime="2026-01-28 13:08:33.856914626 +0000 UTC m=+1340.769131694" watchObservedRunningTime="2026-01-28 13:08:33.860742152 +0000 UTC m=+1340.772959190" Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.870685 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerStarted","Data":"eae12cf9ef61d4111d9f62ec6e8852eb5190411ee72d701659b19bb36c5db262"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.905806 4848 generic.go:334] "Generic (PLEG): container finished" podID="6832a35d-3728-4d58-9960-96044664057b" containerID="3364f35f7251e65babcc5729255091d4ca53cd08834412ad0965ac45d1a74400" exitCode=143 Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.905859 4848 generic.go:334] "Generic (PLEG): container finished" podID="6832a35d-3728-4d58-9960-96044664057b" containerID="8dbdfe99eb7840d0e6007501ccfd03e7f0a273c85743078be2c1ccb229be2514" exitCode=143 Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.905951 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6832a35d-3728-4d58-9960-96044664057b","Type":"ContainerDied","Data":"3364f35f7251e65babcc5729255091d4ca53cd08834412ad0965ac45d1a74400"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.905988 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6832a35d-3728-4d58-9960-96044664057b","Type":"ContainerDied","Data":"8dbdfe99eb7840d0e6007501ccfd03e7f0a273c85743078be2c1ccb229be2514"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.914621 4848 generic.go:334] "Generic (PLEG): container finished" podID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerID="1a15a5f3c078457dd662815ad13c74dd0b5bb68ea375ecd6282c9494f791d303" exitCode=143 Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.914665 4848 generic.go:334] "Generic (PLEG): container finished" podID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerID="28bf47550d47c6dd58fe91c37f0fc251164e4250d0b3ee246f0f0fff62969ac8" exitCode=143 Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.915357 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b2c3ecea-44d3-406d-b40a-b1d4515e5764","Type":"ContainerDied","Data":"1a15a5f3c078457dd662815ad13c74dd0b5bb68ea375ecd6282c9494f791d303"} Jan 28 13:08:33 crc kubenswrapper[4848]: I0128 13:08:33.915469 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b2c3ecea-44d3-406d-b40a-b1d4515e5764","Type":"ContainerDied","Data":"28bf47550d47c6dd58fe91c37f0fc251164e4250d0b3ee246f0f0fff62969ac8"} Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.151378 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.299727 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.303367 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.402935 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrwct\" (UniqueName: \"kubernetes.io/projected/b2c3ecea-44d3-406d-b40a-b1d4515e5764-kube-api-access-zrwct\") pod \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403036 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-httpd-run\") pod \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403156 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"6832a35d-3728-4d58-9960-96044664057b\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403215 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-logs\") pod \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403240 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403310 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-scripts\") pod \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403357 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-combined-ca-bundle\") pod \"6832a35d-3728-4d58-9960-96044664057b\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403400 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-combined-ca-bundle\") pod \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403458 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-logs\") pod \"6832a35d-3728-4d58-9960-96044664057b\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403506 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-scripts\") pod \"6832a35d-3728-4d58-9960-96044664057b\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403544 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5bcj\" (UniqueName: \"kubernetes.io/projected/6832a35d-3728-4d58-9960-96044664057b-kube-api-access-g5bcj\") pod \"6832a35d-3728-4d58-9960-96044664057b\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403626 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-config-data\") pod \"6832a35d-3728-4d58-9960-96044664057b\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403679 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-config-data\") pod \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\" (UID: \"b2c3ecea-44d3-406d-b40a-b1d4515e5764\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.403910 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-httpd-run\") pod \"6832a35d-3728-4d58-9960-96044664057b\" (UID: \"6832a35d-3728-4d58-9960-96044664057b\") " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.405575 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6832a35d-3728-4d58-9960-96044664057b" (UID: "6832a35d-3728-4d58-9960-96044664057b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.411566 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-logs" (OuterVolumeSpecName: "logs") pod "6832a35d-3728-4d58-9960-96044664057b" (UID: "6832a35d-3728-4d58-9960-96044664057b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.411670 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b2c3ecea-44d3-406d-b40a-b1d4515e5764" (UID: "b2c3ecea-44d3-406d-b40a-b1d4515e5764"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.416008 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-logs" (OuterVolumeSpecName: "logs") pod "b2c3ecea-44d3-406d-b40a-b1d4515e5764" (UID: "b2c3ecea-44d3-406d-b40a-b1d4515e5764"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.418479 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "6832a35d-3728-4d58-9960-96044664057b" (UID: "6832a35d-3728-4d58-9960-96044664057b"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.455165 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6832a35d-3728-4d58-9960-96044664057b-kube-api-access-g5bcj" (OuterVolumeSpecName: "kube-api-access-g5bcj") pod "6832a35d-3728-4d58-9960-96044664057b" (UID: "6832a35d-3728-4d58-9960-96044664057b"). InnerVolumeSpecName "kube-api-access-g5bcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.466463 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-scripts" (OuterVolumeSpecName: "scripts") pod "6832a35d-3728-4d58-9960-96044664057b" (UID: "6832a35d-3728-4d58-9960-96044664057b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.476461 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-scripts" (OuterVolumeSpecName: "scripts") pod "b2c3ecea-44d3-406d-b40a-b1d4515e5764" (UID: "b2c3ecea-44d3-406d-b40a-b1d4515e5764"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.479591 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "b2c3ecea-44d3-406d-b40a-b1d4515e5764" (UID: "b2c3ecea-44d3-406d-b40a-b1d4515e5764"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.482953 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2c3ecea-44d3-406d-b40a-b1d4515e5764-kube-api-access-zrwct" (OuterVolumeSpecName: "kube-api-access-zrwct") pod "b2c3ecea-44d3-406d-b40a-b1d4515e5764" (UID: "b2c3ecea-44d3-406d-b40a-b1d4515e5764"). InnerVolumeSpecName "kube-api-access-zrwct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507001 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507095 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507110 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507119 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507131 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507139 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507148 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5bcj\" (UniqueName: \"kubernetes.io/projected/6832a35d-3728-4d58-9960-96044664057b-kube-api-access-g5bcj\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507157 4848 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6832a35d-3728-4d58-9960-96044664057b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507165 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrwct\" (UniqueName: \"kubernetes.io/projected/b2c3ecea-44d3-406d-b40a-b1d4515e5764-kube-api-access-zrwct\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.507173 4848 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2c3ecea-44d3-406d-b40a-b1d4515e5764-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.588740 4848 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.604663 4848 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.610154 4848 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.610203 4848 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.635508 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-config-data" (OuterVolumeSpecName: "config-data") pod "6832a35d-3728-4d58-9960-96044664057b" (UID: "6832a35d-3728-4d58-9960-96044664057b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.645516 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2c3ecea-44d3-406d-b40a-b1d4515e5764" (UID: "b2c3ecea-44d3-406d-b40a-b1d4515e5764"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.713148 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.713468 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.719329 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-config-data" (OuterVolumeSpecName: "config-data") pod "b2c3ecea-44d3-406d-b40a-b1d4515e5764" (UID: "b2c3ecea-44d3-406d-b40a-b1d4515e5764"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.741119 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6832a35d-3728-4d58-9960-96044664057b" (UID: "6832a35d-3728-4d58-9960-96044664057b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.815755 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6832a35d-3728-4d58-9960-96044664057b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.815796 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2c3ecea-44d3-406d-b40a-b1d4515e5764-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.944831 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b2c3ecea-44d3-406d-b40a-b1d4515e5764","Type":"ContainerDied","Data":"6bb43f3aea45a7a30d306eb78ee0342b5a7c063811923708c89ac5e3f92de2d6"} Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.944900 4848 scope.go:117] "RemoveContainer" containerID="1a15a5f3c078457dd662815ad13c74dd0b5bb68ea375ecd6282c9494f791d303" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.944900 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.980684 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-669cc887b-rnh7b" event={"ID":"feba4e43-dc8d-455e-a760-82f68f781511","Type":"ContainerStarted","Data":"07c7d0ebc2e352b95662b5df7de20448613fd985b6cc87a9fc3faf78c3314038"} Jan 28 13:08:34 crc kubenswrapper[4848]: I0128 13:08:34.992698 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerStarted","Data":"86eb3e747fc93c33d00d8e4d0356f1e4fa0c3ff7153d583760cd8d107f60fa53"} Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.031194 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.033033 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6832a35d-3728-4d58-9960-96044664057b","Type":"ContainerDied","Data":"ebca748a724515015996c9ec75d56e70851b4f24fafaed1a3685d0e4fd52dc16"} Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.055205 4848 scope.go:117] "RemoveContainer" containerID="28bf47550d47c6dd58fe91c37f0fc251164e4250d0b3ee246f0f0fff62969ac8" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.073132 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.188710 4848 scope.go:117] "RemoveContainer" containerID="3364f35f7251e65babcc5729255091d4ca53cd08834412ad0965ac45d1a74400" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.291074 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.291702 4848 scope.go:117] "RemoveContainer" containerID="8dbdfe99eb7840d0e6007501ccfd03e7f0a273c85743078be2c1ccb229be2514" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.332737 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: E0128 13:08:35.334573 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-log" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334589 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-log" Jan 28 13:08:35 crc kubenswrapper[4848]: E0128 13:08:35.334652 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-log" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334659 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-log" Jan 28 13:08:35 crc kubenswrapper[4848]: E0128 13:08:35.334684 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-httpd" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334692 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-httpd" Jan 28 13:08:35 crc kubenswrapper[4848]: E0128 13:08:35.334721 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-httpd" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334727 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-httpd" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334899 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-httpd" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334907 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" containerName="glance-log" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334926 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-log" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.334943 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6832a35d-3728-4d58-9960-96044664057b" containerName="glance-httpd" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.336191 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.342906 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.343227 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.343448 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.343547 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c898f" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.398548 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.427341 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.439610 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.469229 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470420 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-logs\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470480 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470520 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-config-data\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470581 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-scripts\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470612 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470633 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470684 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.470743 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7pg6\" (UniqueName: \"kubernetes.io/projected/6562fac3-e8b3-409d-b81c-aba6bef140d4-kube-api-access-r7pg6\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.472517 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.477018 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.479736 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.480003 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.498361 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573181 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7pg6\" (UniqueName: \"kubernetes.io/projected/6562fac3-e8b3-409d-b81c-aba6bef140d4-kube-api-access-r7pg6\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573316 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-logs\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573358 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573394 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573435 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573494 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-config-data\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573518 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573541 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgtlc\" (UniqueName: \"kubernetes.io/projected/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-kube-api-access-jgtlc\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573568 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-logs\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573657 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573687 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-scripts\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573752 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573785 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573832 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573874 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.573957 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.574372 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.576917 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-logs\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.577535 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.595725 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7pg6\" (UniqueName: \"kubernetes.io/projected/6562fac3-e8b3-409d-b81c-aba6bef140d4-kube-api-access-r7pg6\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.600948 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-scripts\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.602007 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.607650 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-config-data\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.620155 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.648417 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.677827 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.678007 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.678048 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.678104 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.678130 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgtlc\" (UniqueName: \"kubernetes.io/projected/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-kube-api-access-jgtlc\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.678160 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-logs\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.678309 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.678435 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.681844 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.681965 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-logs\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.682327 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.689430 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.693152 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.721516 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.724296 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgtlc\" (UniqueName: \"kubernetes.io/projected/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-kube-api-access-jgtlc\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.731075 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.765990 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.803927 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:08:35 crc kubenswrapper[4848]: I0128 13:08:35.902846 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.062308 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-67d5988776-4bwdg" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.062764 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.083818 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerStarted","Data":"63d641aae9e0d10bd80aab7cc6d022a716db58cb6ff267e3897c628dc38106d5"} Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.105920 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.122698 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-669cc887b-rnh7b" event={"ID":"feba4e43-dc8d-455e-a760-82f68f781511","Type":"ContainerStarted","Data":"ea54553671a1256ba3e4c8fd36f78f7210e2a5a4b9439dd12da08084173185d4"} Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.122796 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.122839 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.203269 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-669cc887b-rnh7b" podStartSLOduration=5.203225297 podStartE2EDuration="5.203225297s" podCreationTimestamp="2026-01-28 13:08:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:36.151303282 +0000 UTC m=+1343.063520330" watchObservedRunningTime="2026-01-28 13:08:36.203225297 +0000 UTC m=+1343.115442335" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.565747 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:08:36 crc kubenswrapper[4848]: W0128 13:08:36.589731 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6562fac3_e8b3_409d_b81c_aba6bef140d4.slice/crio-d81ebaff6e78e8bf895bfc308968e139397a9ba65ad7ea5f8d99aac6765c8f35 WatchSource:0}: Error finding container d81ebaff6e78e8bf895bfc308968e139397a9ba65ad7ea5f8d99aac6765c8f35: Status 404 returned error can't find the container with id d81ebaff6e78e8bf895bfc308968e139397a9ba65ad7ea5f8d99aac6765c8f35 Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.874272 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6832a35d-3728-4d58-9960-96044664057b" path="/var/lib/kubelet/pods/6832a35d-3728-4d58-9960-96044664057b/volumes" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.875484 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2c3ecea-44d3-406d-b40a-b1d4515e5764" path="/var/lib/kubelet/pods/b2c3ecea-44d3-406d-b40a-b1d4515e5764/volumes" Jan 28 13:08:36 crc kubenswrapper[4848]: I0128 13:08:36.973226 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:08:37 crc kubenswrapper[4848]: I0128 13:08:37.068066 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-67d5988776-4bwdg" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:08:37 crc kubenswrapper[4848]: I0128 13:08:37.078896 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:37 crc kubenswrapper[4848]: I0128 13:08:37.192643 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6562fac3-e8b3-409d-b81c-aba6bef140d4","Type":"ContainerStarted","Data":"d81ebaff6e78e8bf895bfc308968e139397a9ba65ad7ea5f8d99aac6765c8f35"} Jan 28 13:08:37 crc kubenswrapper[4848]: I0128 13:08:37.238324 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc","Type":"ContainerStarted","Data":"95856bb5d7bbfa319dd257381686a877ed26ede5e2557f68011e54760b5808a5"} Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.268148 4848 generic.go:334] "Generic (PLEG): container finished" podID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerID="e37496aa995c774aa528b58d0551e096b7b42b5a7f81dfe53a540ad71e104029" exitCode=0 Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.268227 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-988d7f849-l8xdb" event={"ID":"6964a7ca-3376-4df2-8a5d-bb63e731b0a8","Type":"ContainerDied","Data":"e37496aa995c774aa528b58d0551e096b7b42b5a7f81dfe53a540ad71e104029"} Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.278174 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6562fac3-e8b3-409d-b81c-aba6bef140d4","Type":"ContainerStarted","Data":"126185a13636e2d0ca9391e7cf781af2b1b7382ce765c4cd4c74772ff11e8d66"} Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.301502 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerStarted","Data":"648f40b5458b33b8523780b6953c10b708637fd848d7f2f0d6d95453af0e0ded"} Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.301998 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.307398 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.348951 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.992076394 podStartE2EDuration="9.348905736s" podCreationTimestamp="2026-01-28 13:08:29 +0000 UTC" firstStartedPulling="2026-01-28 13:08:30.837155877 +0000 UTC m=+1337.749372925" lastFinishedPulling="2026-01-28 13:08:37.193985219 +0000 UTC m=+1344.106202267" observedRunningTime="2026-01-28 13:08:38.332784801 +0000 UTC m=+1345.245001849" watchObservedRunningTime="2026-01-28 13:08:38.348905736 +0000 UTC m=+1345.261122764" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.402498 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-combined-ca-bundle\") pod \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.402573 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-httpd-config\") pod \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.402630 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-internal-tls-certs\") pod \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.402799 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-557jq\" (UniqueName: \"kubernetes.io/projected/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-kube-api-access-557jq\") pod \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.402901 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-ovndb-tls-certs\") pod \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.402935 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-config\") pod \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.403031 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-public-tls-certs\") pod \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\" (UID: \"6964a7ca-3376-4df2-8a5d-bb63e731b0a8\") " Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.432547 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "6964a7ca-3376-4df2-8a5d-bb63e731b0a8" (UID: "6964a7ca-3376-4df2-8a5d-bb63e731b0a8"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.458733 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-kube-api-access-557jq" (OuterVolumeSpecName: "kube-api-access-557jq") pod "6964a7ca-3376-4df2-8a5d-bb63e731b0a8" (UID: "6964a7ca-3376-4df2-8a5d-bb63e731b0a8"). InnerVolumeSpecName "kube-api-access-557jq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.506947 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-557jq\" (UniqueName: \"kubernetes.io/projected/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-kube-api-access-557jq\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.507434 4848 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.511926 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6964a7ca-3376-4df2-8a5d-bb63e731b0a8" (UID: "6964a7ca-3376-4df2-8a5d-bb63e731b0a8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.529946 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-config" (OuterVolumeSpecName: "config") pod "6964a7ca-3376-4df2-8a5d-bb63e731b0a8" (UID: "6964a7ca-3376-4df2-8a5d-bb63e731b0a8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.558179 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6964a7ca-3376-4df2-8a5d-bb63e731b0a8" (UID: "6964a7ca-3376-4df2-8a5d-bb63e731b0a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.590514 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "6964a7ca-3376-4df2-8a5d-bb63e731b0a8" (UID: "6964a7ca-3376-4df2-8a5d-bb63e731b0a8"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.601394 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6964a7ca-3376-4df2-8a5d-bb63e731b0a8" (UID: "6964a7ca-3376-4df2-8a5d-bb63e731b0a8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.611472 4848 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.611519 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.611533 4848 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.611546 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.611557 4848 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6964a7ca-3376-4df2-8a5d-bb63e731b0a8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.803105 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-68f5655b9d-76qsp" Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.966007 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-57844b64c8-6jpl8"] Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.966995 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon-log" containerID="cri-o://f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a" gracePeriod=30 Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.967580 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" containerID="cri-o://efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10" gracePeriod=30 Jan 28 13:08:38 crc kubenswrapper[4848]: I0128 13:08:38.976421 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.314187 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc","Type":"ContainerStarted","Data":"b5f8610f4564ec14c03df821d40a2326cec0eab73349364dd920936273f075a1"} Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.317962 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6562fac3-e8b3-409d-b81c-aba6bef140d4","Type":"ContainerStarted","Data":"40b2809b9b20bc5aa0aec621b2cc722241b819605a3fe3834599709d35f7df98"} Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.324410 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-988d7f849-l8xdb" Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.325668 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-988d7f849-l8xdb" event={"ID":"6964a7ca-3376-4df2-8a5d-bb63e731b0a8","Type":"ContainerDied","Data":"0c0061a76299f6f5c0503c3dfab37089a8ad40dd2464d443216a9249941a2781"} Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.325762 4848 scope.go:117] "RemoveContainer" containerID="9398e20b03b8ae080f593bd5259c6dc97990067a5daec6123799379e953cdc10" Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.351722 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.35168827 podStartE2EDuration="4.35168827s" podCreationTimestamp="2026-01-28 13:08:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:39.335591816 +0000 UTC m=+1346.247808854" watchObservedRunningTime="2026-01-28 13:08:39.35168827 +0000 UTC m=+1346.263905308" Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.381347 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-988d7f849-l8xdb"] Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.406804 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-988d7f849-l8xdb"] Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.468793 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.478699 4848 scope.go:117] "RemoveContainer" containerID="e37496aa995c774aa528b58d0551e096b7b42b5a7f81dfe53a540ad71e104029" Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.549218 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.580442 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.656640 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64b6d8556f-wwmfc"] Jan 28 13:08:39 crc kubenswrapper[4848]: I0128 13:08:39.657402 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" podUID="14c72ce0-d825-43de-90d0-42dc10f55471" containerName="dnsmasq-dns" containerID="cri-o://343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9" gracePeriod=10 Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.251459 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.369647 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc","Type":"ContainerStarted","Data":"4e8c6fe2e074bebca1e2d365486caf9924f0b0121bb4b8bed8fc014f8ccb606b"} Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.379151 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-nb\") pod \"14c72ce0-d825-43de-90d0-42dc10f55471\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.379274 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-swift-storage-0\") pod \"14c72ce0-d825-43de-90d0-42dc10f55471\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.379423 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-sb\") pod \"14c72ce0-d825-43de-90d0-42dc10f55471\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.379530 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-config\") pod \"14c72ce0-d825-43de-90d0-42dc10f55471\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.379607 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2w92\" (UniqueName: \"kubernetes.io/projected/14c72ce0-d825-43de-90d0-42dc10f55471-kube-api-access-v2w92\") pod \"14c72ce0-d825-43de-90d0-42dc10f55471\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.379751 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-svc\") pod \"14c72ce0-d825-43de-90d0-42dc10f55471\" (UID: \"14c72ce0-d825-43de-90d0-42dc10f55471\") " Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.403871 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14c72ce0-d825-43de-90d0-42dc10f55471-kube-api-access-v2w92" (OuterVolumeSpecName: "kube-api-access-v2w92") pod "14c72ce0-d825-43de-90d0-42dc10f55471" (UID: "14c72ce0-d825-43de-90d0-42dc10f55471"). InnerVolumeSpecName "kube-api-access-v2w92". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.407903 4848 generic.go:334] "Generic (PLEG): container finished" podID="14c72ce0-d825-43de-90d0-42dc10f55471" containerID="343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9" exitCode=0 Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.407990 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" event={"ID":"14c72ce0-d825-43de-90d0-42dc10f55471","Type":"ContainerDied","Data":"343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9"} Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.408029 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" event={"ID":"14c72ce0-d825-43de-90d0-42dc10f55471","Type":"ContainerDied","Data":"a80d2557e141bb8f4623507dff3fad81c40bd0a0ab1562521c2be73a44458fad"} Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.408052 4848 scope.go:117] "RemoveContainer" containerID="343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.408172 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6d8556f-wwmfc" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.421357 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="cinder-scheduler" containerID="cri-o://6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08" gracePeriod=30 Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.421561 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="probe" containerID="cri-o://35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051" gracePeriod=30 Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.428814 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.428792398 podStartE2EDuration="5.428792398s" podCreationTimestamp="2026-01-28 13:08:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:40.404128577 +0000 UTC m=+1347.316345635" watchObservedRunningTime="2026-01-28 13:08:40.428792398 +0000 UTC m=+1347.341009426" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.474219 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "14c72ce0-d825-43de-90d0-42dc10f55471" (UID: "14c72ce0-d825-43de-90d0-42dc10f55471"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.476964 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "14c72ce0-d825-43de-90d0-42dc10f55471" (UID: "14c72ce0-d825-43de-90d0-42dc10f55471"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.483204 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2w92\" (UniqueName: \"kubernetes.io/projected/14c72ce0-d825-43de-90d0-42dc10f55471-kube-api-access-v2w92\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.483313 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.483325 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.495381 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "14c72ce0-d825-43de-90d0-42dc10f55471" (UID: "14c72ce0-d825-43de-90d0-42dc10f55471"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.509103 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "14c72ce0-d825-43de-90d0-42dc10f55471" (UID: "14c72ce0-d825-43de-90d0-42dc10f55471"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.537648 4848 scope.go:117] "RemoveContainer" containerID="c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.541850 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-config" (OuterVolumeSpecName: "config") pod "14c72ce0-d825-43de-90d0-42dc10f55471" (UID: "14c72ce0-d825-43de-90d0-42dc10f55471"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.574220 4848 scope.go:117] "RemoveContainer" containerID="343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9" Jan 28 13:08:40 crc kubenswrapper[4848]: E0128 13:08:40.578528 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9\": container with ID starting with 343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9 not found: ID does not exist" containerID="343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.578645 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9"} err="failed to get container status \"343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9\": rpc error: code = NotFound desc = could not find container \"343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9\": container with ID starting with 343f5bb9e9310a3f56a3743cf3a656f031d699e58768c08690ac71a2eef9f4f9 not found: ID does not exist" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.578689 4848 scope.go:117] "RemoveContainer" containerID="c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599" Jan 28 13:08:40 crc kubenswrapper[4848]: E0128 13:08:40.579656 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599\": container with ID starting with c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599 not found: ID does not exist" containerID="c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.579808 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599"} err="failed to get container status \"c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599\": rpc error: code = NotFound desc = could not find container \"c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599\": container with ID starting with c422a23827d459cd79826273ca14bc9e527fc1e3e19e71f86ab5140d82abf599 not found: ID does not exist" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.587396 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.587638 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.587718 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14c72ce0-d825-43de-90d0-42dc10f55471-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.754117 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64b6d8556f-wwmfc"] Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.765166 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64b6d8556f-wwmfc"] Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.850777 4848 scope.go:117] "RemoveContainer" containerID="3e40610fb6105a3e7609422162fe2823d3412ba91eac26dc9908de2ad373294c" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.871707 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14c72ce0-d825-43de-90d0-42dc10f55471" path="/var/lib/kubelet/pods/14c72ce0-d825-43de-90d0-42dc10f55471/volumes" Jan 28 13:08:40 crc kubenswrapper[4848]: I0128 13:08:40.872396 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" path="/var/lib/kubelet/pods/6964a7ca-3376-4df2-8a5d-bb63e731b0a8/volumes" Jan 28 13:08:41 crc kubenswrapper[4848]: I0128 13:08:41.100120 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:52606->10.217.0.162:8443: read: connection reset by peer" Jan 28 13:08:41 crc kubenswrapper[4848]: I0128 13:08:41.436779 4848 generic.go:334] "Generic (PLEG): container finished" podID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerID="35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051" exitCode=0 Jan 28 13:08:41 crc kubenswrapper[4848]: I0128 13:08:41.436828 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ffe5c892-4aa9-4e59-9b91-84f8728ece0c","Type":"ContainerDied","Data":"35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051"} Jan 28 13:08:41 crc kubenswrapper[4848]: I0128 13:08:41.439923 4848 generic.go:334] "Generic (PLEG): container finished" podID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerID="efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10" exitCode=0 Jan 28 13:08:41 crc kubenswrapper[4848]: I0128 13:08:41.439980 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57844b64c8-6jpl8" event={"ID":"6b5c0550-a7fd-430e-991f-9eccf00522e2","Type":"ContainerDied","Data":"efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10"} Jan 28 13:08:41 crc kubenswrapper[4848]: I0128 13:08:41.443834 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerStarted","Data":"ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1"} Jan 28 13:08:41 crc kubenswrapper[4848]: I0128 13:08:41.543472 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.262004 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.347128 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data\") pod \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.347302 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-etc-machine-id\") pod \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.347417 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-combined-ca-bundle\") pod \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.347521 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data-custom\") pod \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.347569 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-scripts\") pod \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.347642 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-655fz\" (UniqueName: \"kubernetes.io/projected/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-kube-api-access-655fz\") pod \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\" (UID: \"ffe5c892-4aa9-4e59-9b91-84f8728ece0c\") " Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.349412 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ffe5c892-4aa9-4e59-9b91-84f8728ece0c" (UID: "ffe5c892-4aa9-4e59-9b91-84f8728ece0c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.360090 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ffe5c892-4aa9-4e59-9b91-84f8728ece0c" (UID: "ffe5c892-4aa9-4e59-9b91-84f8728ece0c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.360583 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-kube-api-access-655fz" (OuterVolumeSpecName: "kube-api-access-655fz") pod "ffe5c892-4aa9-4e59-9b91-84f8728ece0c" (UID: "ffe5c892-4aa9-4e59-9b91-84f8728ece0c"). InnerVolumeSpecName "kube-api-access-655fz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.364482 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-scripts" (OuterVolumeSpecName: "scripts") pod "ffe5c892-4aa9-4e59-9b91-84f8728ece0c" (UID: "ffe5c892-4aa9-4e59-9b91-84f8728ece0c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.487585 4848 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.487642 4848 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.487659 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.487679 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-655fz\" (UniqueName: \"kubernetes.io/projected/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-kube-api-access-655fz\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.516904 4848 generic.go:334] "Generic (PLEG): container finished" podID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerID="6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08" exitCode=0 Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.516974 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ffe5c892-4aa9-4e59-9b91-84f8728ece0c","Type":"ContainerDied","Data":"6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08"} Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.517018 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ffe5c892-4aa9-4e59-9b91-84f8728ece0c","Type":"ContainerDied","Data":"cbb6cc0eb6a62767af98c5c6c2334f2fb336c6c2b70a7b01c2e5e9cb9c931dd0"} Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.517046 4848 scope.go:117] "RemoveContainer" containerID="35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.517094 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.550420 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffe5c892-4aa9-4e59-9b91-84f8728ece0c" (UID: "ffe5c892-4aa9-4e59-9b91-84f8728ece0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.568505 4848 scope.go:117] "RemoveContainer" containerID="6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.592358 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.630504 4848 scope.go:117] "RemoveContainer" containerID="35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051" Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.631678 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051\": container with ID starting with 35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051 not found: ID does not exist" containerID="35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.631728 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051"} err="failed to get container status \"35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051\": rpc error: code = NotFound desc = could not find container \"35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051\": container with ID starting with 35057e9ae86392f7171c7c5a71afa6748ce5b22af9525a20e246113379a1e051 not found: ID does not exist" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.631757 4848 scope.go:117] "RemoveContainer" containerID="6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.633401 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data" (OuterVolumeSpecName: "config-data") pod "ffe5c892-4aa9-4e59-9b91-84f8728ece0c" (UID: "ffe5c892-4aa9-4e59-9b91-84f8728ece0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.635636 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08\": container with ID starting with 6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08 not found: ID does not exist" containerID="6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.635700 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08"} err="failed to get container status \"6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08\": rpc error: code = NotFound desc = could not find container \"6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08\": container with ID starting with 6dc2871b164af260fa24c8a2fce333aef3a22b8a4831948d74f48f70edd93b08 not found: ID does not exist" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.694660 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5c892-4aa9-4e59-9b91-84f8728ece0c-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.874524 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.890916 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.915184 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.924052 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14c72ce0-d825-43de-90d0-42dc10f55471" containerName="dnsmasq-dns" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924101 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="14c72ce0-d825-43de-90d0-42dc10f55471" containerName="dnsmasq-dns" Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.924125 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="probe" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924137 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="probe" Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.924173 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-api" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924182 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-api" Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.924205 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-httpd" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924214 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-httpd" Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.924229 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14c72ce0-d825-43de-90d0-42dc10f55471" containerName="init" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924237 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="14c72ce0-d825-43de-90d0-42dc10f55471" containerName="init" Jan 28 13:08:42 crc kubenswrapper[4848]: E0128 13:08:42.924281 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="cinder-scheduler" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924292 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="cinder-scheduler" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924598 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="probe" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924613 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-httpd" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924624 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="14c72ce0-d825-43de-90d0-42dc10f55471" containerName="dnsmasq-dns" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924638 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6964a7ca-3376-4df2-8a5d-bb63e731b0a8" containerName="neutron-api" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.924773 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" containerName="cinder-scheduler" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.926070 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.930084 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 28 13:08:42 crc kubenswrapper[4848]: I0128 13:08:42.933415 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.004634 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-config-data\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.004731 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04fa376f-7bc1-48da-870a-e8bb086f0263-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.004785 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkssx\" (UniqueName: \"kubernetes.io/projected/04fa376f-7bc1-48da-870a-e8bb086f0263-kube-api-access-qkssx\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.004812 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-scripts\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.004840 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.004904 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.107211 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkssx\" (UniqueName: \"kubernetes.io/projected/04fa376f-7bc1-48da-870a-e8bb086f0263-kube-api-access-qkssx\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.107302 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-scripts\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.107336 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.107423 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.107490 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-config-data\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.107566 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04fa376f-7bc1-48da-870a-e8bb086f0263-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.107678 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04fa376f-7bc1-48da-870a-e8bb086f0263-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.114458 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.114604 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.115023 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-scripts\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.120392 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04fa376f-7bc1-48da-870a-e8bb086f0263-config-data\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.135194 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkssx\" (UniqueName: \"kubernetes.io/projected/04fa376f-7bc1-48da-870a-e8bb086f0263-kube-api-access-qkssx\") pod \"cinder-scheduler-0\" (UID: \"04fa376f-7bc1-48da-870a-e8bb086f0263\") " pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.255872 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 13:08:43 crc kubenswrapper[4848]: I0128 13:08:43.848235 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 13:08:44 crc kubenswrapper[4848]: I0128 13:08:44.088795 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 28 13:08:44 crc kubenswrapper[4848]: I0128 13:08:44.203200 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:44 crc kubenswrapper[4848]: I0128 13:08:44.417879 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-648cdddfd-q5sbd" Jan 28 13:08:44 crc kubenswrapper[4848]: I0128 13:08:44.579089 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"04fa376f-7bc1-48da-870a-e8bb086f0263","Type":"ContainerStarted","Data":"81af90e66d863ef3d3901961cbbf991e70263289adbfce290d59c90130ebcec1"} Jan 28 13:08:44 crc kubenswrapper[4848]: I0128 13:08:44.888574 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffe5c892-4aa9-4e59-9b91-84f8728ece0c" path="/var/lib/kubelet/pods/ffe5c892-4aa9-4e59-9b91-84f8728ece0c/volumes" Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.592806 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"04fa376f-7bc1-48da-870a-e8bb086f0263","Type":"ContainerStarted","Data":"594d1ccb62f3e3b324912910fdeb2fdc6ca5f20036480576b78b0302a144f1b9"} Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.593174 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"04fa376f-7bc1-48da-870a-e8bb086f0263","Type":"ContainerStarted","Data":"0504071db797a857db9081c5ec0df47ff8a42e037a6829f42d23f6c6e887ee39"} Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.615747 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.61571932 podStartE2EDuration="3.61571932s" podCreationTimestamp="2026-01-28 13:08:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:08:45.613014726 +0000 UTC m=+1352.525231764" watchObservedRunningTime="2026-01-28 13:08:45.61571932 +0000 UTC m=+1352.527936348" Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.628144 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.771567 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.771649 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.813010 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 13:08:45 crc kubenswrapper[4848]: I0128 13:08:45.820877 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.107284 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.107784 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.162132 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.179183 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.209130 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-669cc887b-rnh7b" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.303928 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-d454d7fbb-hth9j" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.334569 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-67d5988776-4bwdg"] Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.334979 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-67d5988776-4bwdg" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api-log" containerID="cri-o://10178059e9ce1c184d63e0ae5277959b9cc872dfd978718ee0ad2b7e59233887" gracePeriod=30 Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.335180 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-67d5988776-4bwdg" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api" containerID="cri-o://90cad634593a189984f808c2b97d3b77e7b25ecaf5e61f2c0591d941514cea8d" gracePeriod=30 Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.613655 4848 generic.go:334] "Generic (PLEG): container finished" podID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerID="10178059e9ce1c184d63e0ae5277959b9cc872dfd978718ee0ad2b7e59233887" exitCode=143 Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.613749 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67d5988776-4bwdg" event={"ID":"5ac34887-0b39-4669-a00c-c40b4a5f5c1a","Type":"ContainerDied","Data":"10178059e9ce1c184d63e0ae5277959b9cc872dfd978718ee0ad2b7e59233887"} Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.616280 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.616333 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.616349 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:46 crc kubenswrapper[4848]: I0128 13:08:46.616363 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:48 crc kubenswrapper[4848]: I0128 13:08:48.256660 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 13:08:48 crc kubenswrapper[4848]: I0128 13:08:48.638869 4848 generic.go:334] "Generic (PLEG): container finished" podID="88151fad-4442-4d32-a675-f89f070ed086" containerID="ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1" exitCode=1 Jan 28 13:08:48 crc kubenswrapper[4848]: I0128 13:08:48.638966 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerDied","Data":"ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1"} Jan 28 13:08:48 crc kubenswrapper[4848]: I0128 13:08:48.639770 4848 scope.go:117] "RemoveContainer" containerID="3e40610fb6105a3e7609422162fe2823d3412ba91eac26dc9908de2ad373294c" Jan 28 13:08:48 crc kubenswrapper[4848]: I0128 13:08:48.639895 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:08:48 crc kubenswrapper[4848]: I0128 13:08:48.639966 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:08:48 crc kubenswrapper[4848]: I0128 13:08:48.640987 4848 scope.go:117] "RemoveContainer" containerID="ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1" Jan 28 13:08:48 crc kubenswrapper[4848]: E0128 13:08:48.641360 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(88151fad-4442-4d32-a675-f89f070ed086)\"" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.315785 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.316714 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.316798 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.316898 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.462243 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-67d5988776-4bwdg" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": read tcp 10.217.0.2:50856->10.217.0.173:9311: read: connection reset by peer" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.463599 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-67d5988776-4bwdg" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": read tcp 10.217.0.2:50864->10.217.0.173:9311: read: connection reset by peer" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.655469 4848 scope.go:117] "RemoveContainer" containerID="ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1" Jan 28 13:08:49 crc kubenswrapper[4848]: E0128 13:08:49.655860 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(88151fad-4442-4d32-a675-f89f070ed086)\"" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.657623 4848 generic.go:334] "Generic (PLEG): container finished" podID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerID="90cad634593a189984f808c2b97d3b77e7b25ecaf5e61f2c0591d941514cea8d" exitCode=0 Jan 28 13:08:49 crc kubenswrapper[4848]: I0128 13:08:49.657689 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67d5988776-4bwdg" event={"ID":"5ac34887-0b39-4669-a00c-c40b4a5f5c1a","Type":"ContainerDied","Data":"90cad634593a189984f808c2b97d3b77e7b25ecaf5e61f2c0591d941514cea8d"} Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.015624 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.110842 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-logs\") pod \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.111018 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data\") pod \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.111158 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data-custom\") pod \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.111218 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-combined-ca-bundle\") pod \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.111396 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tvps\" (UniqueName: \"kubernetes.io/projected/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-kube-api-access-6tvps\") pod \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\" (UID: \"5ac34887-0b39-4669-a00c-c40b4a5f5c1a\") " Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.113227 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-logs" (OuterVolumeSpecName: "logs") pod "5ac34887-0b39-4669-a00c-c40b4a5f5c1a" (UID: "5ac34887-0b39-4669-a00c-c40b4a5f5c1a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.120536 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-kube-api-access-6tvps" (OuterVolumeSpecName: "kube-api-access-6tvps") pod "5ac34887-0b39-4669-a00c-c40b4a5f5c1a" (UID: "5ac34887-0b39-4669-a00c-c40b4a5f5c1a"). InnerVolumeSpecName "kube-api-access-6tvps". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.131177 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5ac34887-0b39-4669-a00c-c40b4a5f5c1a" (UID: "5ac34887-0b39-4669-a00c-c40b4a5f5c1a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.182739 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data" (OuterVolumeSpecName: "config-data") pod "5ac34887-0b39-4669-a00c-c40b4a5f5c1a" (UID: "5ac34887-0b39-4669-a00c-c40b4a5f5c1a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.185946 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.187075 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ac34887-0b39-4669-a00c-c40b4a5f5c1a" (UID: "5ac34887-0b39-4669-a00c-c40b4a5f5c1a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:08:50 crc kubenswrapper[4848]: E0128 13:08:50.189501 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.189538 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api" Jan 28 13:08:50 crc kubenswrapper[4848]: E0128 13:08:50.189594 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api-log" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.189600 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api-log" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.189783 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.189799 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" containerName="barbican-api-log" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.190704 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.193818 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.194165 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-l7q6b" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.195045 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.228116 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tvps\" (UniqueName: \"kubernetes.io/projected/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-kube-api-access-6tvps\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.228177 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.228188 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.228199 4848 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.228209 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac34887-0b39-4669-a00c-c40b4a5f5c1a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.240504 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.330404 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/841fc796-225e-424f-bd6c-d3d43c9814d4-openstack-config\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.330496 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlsh2\" (UniqueName: \"kubernetes.io/projected/841fc796-225e-424f-bd6c-d3d43c9814d4-kube-api-access-wlsh2\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.330531 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/841fc796-225e-424f-bd6c-d3d43c9814d4-openstack-config-secret\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.330643 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/841fc796-225e-424f-bd6c-d3d43c9814d4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.358385 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.358749 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.362783 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.432759 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlsh2\" (UniqueName: \"kubernetes.io/projected/841fc796-225e-424f-bd6c-d3d43c9814d4-kube-api-access-wlsh2\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.432821 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/841fc796-225e-424f-bd6c-d3d43c9814d4-openstack-config-secret\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.432886 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/841fc796-225e-424f-bd6c-d3d43c9814d4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.433036 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/841fc796-225e-424f-bd6c-d3d43c9814d4-openstack-config\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.434210 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/841fc796-225e-424f-bd6c-d3d43c9814d4-openstack-config\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.448174 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/841fc796-225e-424f-bd6c-d3d43c9814d4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.451692 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/841fc796-225e-424f-bd6c-d3d43c9814d4-openstack-config-secret\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.460554 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlsh2\" (UniqueName: \"kubernetes.io/projected/841fc796-225e-424f-bd6c-d3d43c9814d4-kube-api-access-wlsh2\") pod \"openstackclient\" (UID: \"841fc796-225e-424f-bd6c-d3d43c9814d4\") " pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.550175 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.550363 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.560454 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.577637 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.741084 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-67d5988776-4bwdg" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.741767 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67d5988776-4bwdg" event={"ID":"5ac34887-0b39-4669-a00c-c40b4a5f5c1a","Type":"ContainerDied","Data":"f2edef2fa3b925c6eed27417773a3e3a1830087f351cf69d7c53aaf5a89a05e2"} Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.741823 4848 scope.go:117] "RemoveContainer" containerID="90cad634593a189984f808c2b97d3b77e7b25ecaf5e61f2c0591d941514cea8d" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.797722 4848 scope.go:117] "RemoveContainer" containerID="10178059e9ce1c184d63e0ae5277959b9cc872dfd978718ee0ad2b7e59233887" Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.834340 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-67d5988776-4bwdg"] Jan 28 13:08:50 crc kubenswrapper[4848]: I0128 13:08:50.872852 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-67d5988776-4bwdg"] Jan 28 13:08:51 crc kubenswrapper[4848]: I0128 13:08:51.149009 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 13:08:51 crc kubenswrapper[4848]: I0128 13:08:51.542527 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 28 13:08:51 crc kubenswrapper[4848]: I0128 13:08:51.754841 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"841fc796-225e-424f-bd6c-d3d43c9814d4","Type":"ContainerStarted","Data":"3a43adba38401b6612abb1767fcf643b3448ceead4cde1eb06372c4c5cd8c544"} Jan 28 13:08:52 crc kubenswrapper[4848]: I0128 13:08:52.863647 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ac34887-0b39-4669-a00c-c40b4a5f5c1a" path="/var/lib/kubelet/pods/5ac34887-0b39-4669-a00c-c40b4a5f5c1a/volumes" Jan 28 13:08:53 crc kubenswrapper[4848]: I0128 13:08:53.430422 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.384982 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-77cbfc9c5c-vjds6"] Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.388064 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.392763 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.393048 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.393075 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.410008 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-77cbfc9c5c-vjds6"] Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.475625 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c80f3cf-4e08-4748-95eb-400461e61399-log-httpd\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.475717 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-internal-tls-certs\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.475970 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8c80f3cf-4e08-4748-95eb-400461e61399-etc-swift\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.476398 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-combined-ca-bundle\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.476713 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-public-tls-certs\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.476775 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-config-data\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.477112 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c80f3cf-4e08-4748-95eb-400461e61399-run-httpd\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.477146 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwchq\" (UniqueName: \"kubernetes.io/projected/8c80f3cf-4e08-4748-95eb-400461e61399-kube-api-access-wwchq\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579564 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwchq\" (UniqueName: \"kubernetes.io/projected/8c80f3cf-4e08-4748-95eb-400461e61399-kube-api-access-wwchq\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579621 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c80f3cf-4e08-4748-95eb-400461e61399-run-httpd\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579665 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c80f3cf-4e08-4748-95eb-400461e61399-log-httpd\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579730 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-internal-tls-certs\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579762 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8c80f3cf-4e08-4748-95eb-400461e61399-etc-swift\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579805 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-combined-ca-bundle\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579850 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-public-tls-certs\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.579869 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-config-data\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.580916 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c80f3cf-4e08-4748-95eb-400461e61399-run-httpd\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.580929 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c80f3cf-4e08-4748-95eb-400461e61399-log-httpd\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.588232 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-internal-tls-certs\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.588782 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8c80f3cf-4e08-4748-95eb-400461e61399-etc-swift\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.589220 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-config-data\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.599706 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwchq\" (UniqueName: \"kubernetes.io/projected/8c80f3cf-4e08-4748-95eb-400461e61399-kube-api-access-wwchq\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.600167 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-public-tls-certs\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.601165 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c80f3cf-4e08-4748-95eb-400461e61399-combined-ca-bundle\") pod \"swift-proxy-77cbfc9c5c-vjds6\" (UID: \"8c80f3cf-4e08-4748-95eb-400461e61399\") " pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:55 crc kubenswrapper[4848]: I0128 13:08:55.718699 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.077537 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.078303 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-central-agent" containerID="cri-o://eae12cf9ef61d4111d9f62ec6e8852eb5190411ee72d701659b19bb36c5db262" gracePeriod=30 Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.078375 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="proxy-httpd" containerID="cri-o://648f40b5458b33b8523780b6953c10b708637fd848d7f2f0d6d95453af0e0ded" gracePeriod=30 Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.078464 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="sg-core" containerID="cri-o://63d641aae9e0d10bd80aab7cc6d022a716db58cb6ff267e3897c628dc38106d5" gracePeriod=30 Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.078506 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-notification-agent" containerID="cri-o://86eb3e747fc93c33d00d8e4d0356f1e4fa0c3ff7153d583760cd8d107f60fa53" gracePeriod=30 Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.100003 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 28 13:08:56 crc kubenswrapper[4848]: E0128 13:08:56.407757 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2907bca_1e85_4d6e_a716_a495b7affeaa.slice/crio-648f40b5458b33b8523780b6953c10b708637fd848d7f2f0d6d95453af0e0ded.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2907bca_1e85_4d6e_a716_a495b7affeaa.slice/crio-conmon-648f40b5458b33b8523780b6953c10b708637fd848d7f2f0d6d95453af0e0ded.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.834009 4848 generic.go:334] "Generic (PLEG): container finished" podID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerID="648f40b5458b33b8523780b6953c10b708637fd848d7f2f0d6d95453af0e0ded" exitCode=0 Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.834049 4848 generic.go:334] "Generic (PLEG): container finished" podID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerID="63d641aae9e0d10bd80aab7cc6d022a716db58cb6ff267e3897c628dc38106d5" exitCode=2 Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.834056 4848 generic.go:334] "Generic (PLEG): container finished" podID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerID="eae12cf9ef61d4111d9f62ec6e8852eb5190411ee72d701659b19bb36c5db262" exitCode=0 Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.834095 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerDied","Data":"648f40b5458b33b8523780b6953c10b708637fd848d7f2f0d6d95453af0e0ded"} Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.834167 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerDied","Data":"63d641aae9e0d10bd80aab7cc6d022a716db58cb6ff267e3897c628dc38106d5"} Jan 28 13:08:56 crc kubenswrapper[4848]: I0128 13:08:56.834178 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerDied","Data":"eae12cf9ef61d4111d9f62ec6e8852eb5190411ee72d701659b19bb36c5db262"} Jan 28 13:08:59 crc kubenswrapper[4848]: I0128 13:08:59.031620 4848 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod4d9e697a-2a27-44ad-b426-2cb40cddadea"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod4d9e697a-2a27-44ad-b426-2cb40cddadea] : Timed out while waiting for systemd to remove kubepods-besteffort-pod4d9e697a_2a27_44ad_b426_2cb40cddadea.slice" Jan 28 13:08:59 crc kubenswrapper[4848]: I0128 13:08:59.679941 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.181:3000/\": dial tcp 10.217.0.181:3000: connect: connection refused" Jan 28 13:08:59 crc kubenswrapper[4848]: I0128 13:08:59.877297 4848 generic.go:334] "Generic (PLEG): container finished" podID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerID="86eb3e747fc93c33d00d8e4d0356f1e4fa0c3ff7153d583760cd8d107f60fa53" exitCode=0 Jan 28 13:08:59 crc kubenswrapper[4848]: I0128 13:08:59.877376 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerDied","Data":"86eb3e747fc93c33d00d8e4d0356f1e4fa0c3ff7153d583760cd8d107f60fa53"} Jan 28 13:09:00 crc kubenswrapper[4848]: I0128 13:09:00.402673 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6997cd7cdf-nf254" Jan 28 13:09:00 crc kubenswrapper[4848]: I0128 13:09:00.474820 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5b675789b4-dl5kz"] Jan 28 13:09:00 crc kubenswrapper[4848]: I0128 13:09:00.475601 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5b675789b4-dl5kz" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-api" containerID="cri-o://2d2ecde7461e221c7d8d3a2b1caaef35914587f90bd06e2ccad8edacc29c6cac" gracePeriod=30 Jan 28 13:09:00 crc kubenswrapper[4848]: I0128 13:09:00.475928 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5b675789b4-dl5kz" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-httpd" containerID="cri-o://32e6d18168dff15a69d817ec29d8f37df1abadb68fe0ad9ba23143e4921a13ed" gracePeriod=30 Jan 28 13:09:00 crc kubenswrapper[4848]: I0128 13:09:00.892844 4848 generic.go:334] "Generic (PLEG): container finished" podID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerID="32e6d18168dff15a69d817ec29d8f37df1abadb68fe0ad9ba23143e4921a13ed" exitCode=0 Jan 28 13:09:00 crc kubenswrapper[4848]: I0128 13:09:00.892890 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b675789b4-dl5kz" event={"ID":"016c28ae-9306-4dd5-a68d-d4dd124b0f79","Type":"ContainerDied","Data":"32e6d18168dff15a69d817ec29d8f37df1abadb68fe0ad9ba23143e4921a13ed"} Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.362571 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.417604 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k56gw\" (UniqueName: \"kubernetes.io/projected/a2907bca-1e85-4d6e-a716-a495b7affeaa-kube-api-access-k56gw\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.418539 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.418631 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-sg-core-conf-yaml\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.418731 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-scripts\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.418812 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-run-httpd\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.418885 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-config-data\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.418929 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-log-httpd\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.421141 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.424588 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.433928 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-scripts" (OuterVolumeSpecName: "scripts") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.434101 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2907bca-1e85-4d6e-a716-a495b7affeaa-kube-api-access-k56gw" (OuterVolumeSpecName: "kube-api-access-k56gw") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "kube-api-access-k56gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.460374 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.521032 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.525666 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle\") pod \"a2907bca-1e85-4d6e-a716-a495b7affeaa\" (UID: \"a2907bca-1e85-4d6e-a716-a495b7affeaa\") " Jan 28 13:09:01 crc kubenswrapper[4848]: W0128 13:09:01.525872 4848 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/a2907bca-1e85-4d6e-a716-a495b7affeaa/volumes/kubernetes.io~secret/combined-ca-bundle Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.525905 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.527827 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.527853 4848 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.527862 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.527874 4848 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.527883 4848 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2907bca-1e85-4d6e-a716-a495b7affeaa-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.527893 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k56gw\" (UniqueName: \"kubernetes.io/projected/a2907bca-1e85-4d6e-a716-a495b7affeaa-kube-api-access-k56gw\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.542670 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-57844b64c8-6jpl8" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.565052 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-config-data" (OuterVolumeSpecName: "config-data") pod "a2907bca-1e85-4d6e-a716-a495b7affeaa" (UID: "a2907bca-1e85-4d6e-a716-a495b7affeaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.629923 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2907bca-1e85-4d6e-a716-a495b7affeaa-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.707786 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-77cbfc9c5c-vjds6"] Jan 28 13:09:01 crc kubenswrapper[4848]: W0128 13:09:01.712408 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c80f3cf_4e08_4748_95eb_400461e61399.slice/crio-bbd315ee852a5ac265b3df7879a1870b4f276c971d408cea2aa3c24fd2d97148 WatchSource:0}: Error finding container bbd315ee852a5ac265b3df7879a1870b4f276c971d408cea2aa3c24fd2d97148: Status 404 returned error can't find the container with id bbd315ee852a5ac265b3df7879a1870b4f276c971d408cea2aa3c24fd2d97148 Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.906881 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" event={"ID":"8c80f3cf-4e08-4748-95eb-400461e61399","Type":"ContainerStarted","Data":"bbd315ee852a5ac265b3df7879a1870b4f276c971d408cea2aa3c24fd2d97148"} Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.910286 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"841fc796-225e-424f-bd6c-d3d43c9814d4","Type":"ContainerStarted","Data":"9a48a57e9a648a1504ae728d622b19113911c6f54ad76bd648d29cf8ded18aed"} Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.915597 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2907bca-1e85-4d6e-a716-a495b7affeaa","Type":"ContainerDied","Data":"546db06f5c37572d1b47a875b4ace2b0abc3fd93a43020a3662e87029c182b9c"} Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.915651 4848 scope.go:117] "RemoveContainer" containerID="648f40b5458b33b8523780b6953c10b708637fd848d7f2f0d6d95453af0e0ded" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.915782 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.943375 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.107569006 podStartE2EDuration="11.943348904s" podCreationTimestamp="2026-01-28 13:08:50 +0000 UTC" firstStartedPulling="2026-01-28 13:08:51.163340693 +0000 UTC m=+1358.075557731" lastFinishedPulling="2026-01-28 13:09:00.999120591 +0000 UTC m=+1367.911337629" observedRunningTime="2026-01-28 13:09:01.933721008 +0000 UTC m=+1368.845938046" watchObservedRunningTime="2026-01-28 13:09:01.943348904 +0000 UTC m=+1368.855565932" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.969143 4848 scope.go:117] "RemoveContainer" containerID="63d641aae9e0d10bd80aab7cc6d022a716db58cb6ff267e3897c628dc38106d5" Jan 28 13:09:01 crc kubenswrapper[4848]: I0128 13:09:01.997952 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.003047 4848 scope.go:117] "RemoveContainer" containerID="86eb3e747fc93c33d00d8e4d0356f1e4fa0c3ff7153d583760cd8d107f60fa53" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.008880 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.040270 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:02 crc kubenswrapper[4848]: E0128 13:09:02.040890 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="sg-core" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.040915 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="sg-core" Jan 28 13:09:02 crc kubenswrapper[4848]: E0128 13:09:02.040943 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="proxy-httpd" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.040951 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="proxy-httpd" Jan 28 13:09:02 crc kubenswrapper[4848]: E0128 13:09:02.040963 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-notification-agent" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.040970 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-notification-agent" Jan 28 13:09:02 crc kubenswrapper[4848]: E0128 13:09:02.040985 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-central-agent" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.040992 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-central-agent" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.041318 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="sg-core" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.041350 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="proxy-httpd" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.041368 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-notification-agent" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.041384 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" containerName="ceilometer-central-agent" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.044055 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.044357 4848 scope.go:117] "RemoveContainer" containerID="eae12cf9ef61d4111d9f62ec6e8852eb5190411ee72d701659b19bb36c5db262" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.048015 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.048196 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.076782 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.141945 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhwx4\" (UniqueName: \"kubernetes.io/projected/6d1240ae-2011-41cb-90a2-6f050020e305-kube-api-access-fhwx4\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.142028 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-run-httpd\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.142088 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-config-data\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.142134 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-scripts\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.142159 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.142185 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.142206 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-log-httpd\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.243981 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.244044 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.244070 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-log-httpd\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.244138 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhwx4\" (UniqueName: \"kubernetes.io/projected/6d1240ae-2011-41cb-90a2-6f050020e305-kube-api-access-fhwx4\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.244200 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-run-httpd\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.244288 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-config-data\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.244341 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-scripts\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.245206 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-log-httpd\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.245448 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-run-httpd\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.250436 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.256737 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-scripts\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.258115 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.258945 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-config-data\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.265869 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhwx4\" (UniqueName: \"kubernetes.io/projected/6d1240ae-2011-41cb-90a2-6f050020e305-kube-api-access-fhwx4\") pod \"ceilometer-0\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.371411 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.866695 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2907bca-1e85-4d6e-a716-a495b7affeaa" path="/var/lib/kubelet/pods/a2907bca-1e85-4d6e-a716-a495b7affeaa/volumes" Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.984832 4848 generic.go:334] "Generic (PLEG): container finished" podID="5eebd755-02f3-4d5b-8658-9620128db59c" containerID="7b190a1936b18e6a0594ad7d98ae8d062176ac47c0ab2fbc8c5d65c06ec0c0d9" exitCode=137 Jan 28 13:09:02 crc kubenswrapper[4848]: I0128 13:09:02.986002 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5eebd755-02f3-4d5b-8658-9620128db59c","Type":"ContainerDied","Data":"7b190a1936b18e6a0594ad7d98ae8d062176ac47c0ab2fbc8c5d65c06ec0c0d9"} Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.037542 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" event={"ID":"8c80f3cf-4e08-4748-95eb-400461e61399","Type":"ContainerStarted","Data":"b204c17e83b8360146a68b667383cd93e5f53d6cd1e25ad89f6e3f1d25afe82a"} Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.037648 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" event={"ID":"8c80f3cf-4e08-4748-95eb-400461e61399","Type":"ContainerStarted","Data":"53fddc9f22385fe4d229bd6a117b63edc7158ae8c1253c5e03daefbafb863716"} Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.117556 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" podStartSLOduration=8.117521046 podStartE2EDuration="8.117521046s" podCreationTimestamp="2026-01-28 13:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:03.080273311 +0000 UTC m=+1369.992490359" watchObservedRunningTime="2026-01-28 13:09:03.117521046 +0000 UTC m=+1370.029738084" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.120745 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.354893 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.442086 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eebd755-02f3-4d5b-8658-9620128db59c-etc-machine-id\") pod \"5eebd755-02f3-4d5b-8658-9620128db59c\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.442401 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5eebd755-02f3-4d5b-8658-9620128db59c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5eebd755-02f3-4d5b-8658-9620128db59c" (UID: "5eebd755-02f3-4d5b-8658-9620128db59c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.442782 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vwc6\" (UniqueName: \"kubernetes.io/projected/5eebd755-02f3-4d5b-8658-9620128db59c-kube-api-access-8vwc6\") pod \"5eebd755-02f3-4d5b-8658-9620128db59c\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.444188 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data\") pod \"5eebd755-02f3-4d5b-8658-9620128db59c\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.444506 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-combined-ca-bundle\") pod \"5eebd755-02f3-4d5b-8658-9620128db59c\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.444660 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data-custom\") pod \"5eebd755-02f3-4d5b-8658-9620128db59c\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.446300 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5eebd755-02f3-4d5b-8658-9620128db59c-logs\") pod \"5eebd755-02f3-4d5b-8658-9620128db59c\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.446466 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-scripts\") pod \"5eebd755-02f3-4d5b-8658-9620128db59c\" (UID: \"5eebd755-02f3-4d5b-8658-9620128db59c\") " Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.447454 4848 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eebd755-02f3-4d5b-8658-9620128db59c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.447544 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5eebd755-02f3-4d5b-8658-9620128db59c-logs" (OuterVolumeSpecName: "logs") pod "5eebd755-02f3-4d5b-8658-9620128db59c" (UID: "5eebd755-02f3-4d5b-8658-9620128db59c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.457980 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-scripts" (OuterVolumeSpecName: "scripts") pod "5eebd755-02f3-4d5b-8658-9620128db59c" (UID: "5eebd755-02f3-4d5b-8658-9620128db59c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.458285 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eebd755-02f3-4d5b-8658-9620128db59c-kube-api-access-8vwc6" (OuterVolumeSpecName: "kube-api-access-8vwc6") pod "5eebd755-02f3-4d5b-8658-9620128db59c" (UID: "5eebd755-02f3-4d5b-8658-9620128db59c"). InnerVolumeSpecName "kube-api-access-8vwc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.463104 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5eebd755-02f3-4d5b-8658-9620128db59c" (UID: "5eebd755-02f3-4d5b-8658-9620128db59c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.494140 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5eebd755-02f3-4d5b-8658-9620128db59c" (UID: "5eebd755-02f3-4d5b-8658-9620128db59c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.549830 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vwc6\" (UniqueName: \"kubernetes.io/projected/5eebd755-02f3-4d5b-8658-9620128db59c-kube-api-access-8vwc6\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.549865 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.549874 4848 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.549883 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5eebd755-02f3-4d5b-8658-9620128db59c-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.549893 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.578589 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data" (OuterVolumeSpecName: "config-data") pod "5eebd755-02f3-4d5b-8658-9620128db59c" (UID: "5eebd755-02f3-4d5b-8658-9620128db59c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.651485 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eebd755-02f3-4d5b-8658-9620128db59c-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:03 crc kubenswrapper[4848]: I0128 13:09:03.850632 4848 scope.go:117] "RemoveContainer" containerID="ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1" Jan 28 13:09:03 crc kubenswrapper[4848]: E0128 13:09:03.850969 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(88151fad-4442-4d32-a675-f89f070ed086)\"" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.050861 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5eebd755-02f3-4d5b-8658-9620128db59c","Type":"ContainerDied","Data":"c8e7c85ee66d6e1a1677222adc8e5e4e89a7f0e67b7aa7e51ebb2812f461f15a"} Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.050936 4848 scope.go:117] "RemoveContainer" containerID="7b190a1936b18e6a0594ad7d98ae8d062176ac47c0ab2fbc8c5d65c06ec0c0d9" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.051114 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.067761 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerStarted","Data":"cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac"} Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.067833 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerStarted","Data":"1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2"} Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.067844 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerStarted","Data":"cb1be744ec3f12f54559009d0d95fb359a966eba3665ebf39126ea6afcd18222"} Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.068794 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.068847 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.145926 4848 scope.go:117] "RemoveContainer" containerID="8009cb77068167102fdbe0b9dcd35b5805bf6f3b97352e51f76e770bac818737" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.155327 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.192320 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.244372 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:09:04 crc kubenswrapper[4848]: E0128 13:09:04.245087 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api-log" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.245122 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api-log" Jan 28 13:09:04 crc kubenswrapper[4848]: E0128 13:09:04.245138 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.245149 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.245439 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api-log" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.245490 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" containerName="cinder-api" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.246898 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.260224 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.260405 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.260511 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.277670 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.378640 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a69cc57-5cf8-4b44-a956-5641d66512fa-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.378700 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-scripts\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.378835 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.378860 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-config-data\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.378943 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5f4c\" (UniqueName: \"kubernetes.io/projected/0a69cc57-5cf8-4b44-a956-5641d66512fa-kube-api-access-g5f4c\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.379064 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.379206 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-public-tls-certs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.379278 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a69cc57-5cf8-4b44-a956-5641d66512fa-logs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.379330 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-config-data-custom\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482391 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-config-data-custom\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482489 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a69cc57-5cf8-4b44-a956-5641d66512fa-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482507 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-scripts\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482548 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482564 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-config-data\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482597 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5f4c\" (UniqueName: \"kubernetes.io/projected/0a69cc57-5cf8-4b44-a956-5641d66512fa-kube-api-access-g5f4c\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482635 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482679 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-public-tls-certs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.482697 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a69cc57-5cf8-4b44-a956-5641d66512fa-logs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.483486 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a69cc57-5cf8-4b44-a956-5641d66512fa-logs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.487754 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a69cc57-5cf8-4b44-a956-5641d66512fa-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.487831 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-config-data-custom\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.489449 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-scripts\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.490366 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-config-data\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.491055 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.491865 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.510413 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a69cc57-5cf8-4b44-a956-5641d66512fa-public-tls-certs\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.513501 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5f4c\" (UniqueName: \"kubernetes.io/projected/0a69cc57-5cf8-4b44-a956-5641d66512fa-kube-api-access-g5f4c\") pod \"cinder-api-0\" (UID: \"0a69cc57-5cf8-4b44-a956-5641d66512fa\") " pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.610569 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.661945 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 13:09:04 crc kubenswrapper[4848]: I0128 13:09:04.916648 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5eebd755-02f3-4d5b-8658-9620128db59c" path="/var/lib/kubelet/pods/5eebd755-02f3-4d5b-8658-9620128db59c/volumes" Jan 28 13:09:05 crc kubenswrapper[4848]: I0128 13:09:05.118239 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerStarted","Data":"932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531"} Jan 28 13:09:05 crc kubenswrapper[4848]: I0128 13:09:05.217059 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 13:09:06 crc kubenswrapper[4848]: I0128 13:09:06.133904 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0a69cc57-5cf8-4b44-a956-5641d66512fa","Type":"ContainerStarted","Data":"1191cfeba8e7648918dc0aefc5b482ae49f6efad04f465db7ba01370c867fdef"} Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.320195 4848 generic.go:334] "Generic (PLEG): container finished" podID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerID="2d2ecde7461e221c7d8d3a2b1caaef35914587f90bd06e2ccad8edacc29c6cac" exitCode=0 Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.321093 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b675789b4-dl5kz" event={"ID":"016c28ae-9306-4dd5-a68d-d4dd124b0f79","Type":"ContainerDied","Data":"2d2ecde7461e221c7d8d3a2b1caaef35914587f90bd06e2ccad8edacc29c6cac"} Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.346434 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerStarted","Data":"248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d"} Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.346688 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-central-agent" containerID="cri-o://1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2" gracePeriod=30 Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.347095 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.347489 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="proxy-httpd" containerID="cri-o://248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d" gracePeriod=30 Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.347550 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="sg-core" containerID="cri-o://932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531" gracePeriod=30 Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.347587 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-notification-agent" containerID="cri-o://cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac" gracePeriod=30 Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.365055 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0a69cc57-5cf8-4b44-a956-5641d66512fa","Type":"ContainerStarted","Data":"687db08d6f116ad3b1c45e76cb35599288b4c73aa0234d907618ad23e4c59307"} Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.384782 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.860166788 podStartE2EDuration="6.384759042s" podCreationTimestamp="2026-01-28 13:09:02 +0000 UTC" firstStartedPulling="2026-01-28 13:09:03.059968921 +0000 UTC m=+1369.972185959" lastFinishedPulling="2026-01-28 13:09:07.584561175 +0000 UTC m=+1374.496778213" observedRunningTime="2026-01-28 13:09:08.381094201 +0000 UTC m=+1375.293311239" watchObservedRunningTime="2026-01-28 13:09:08.384759042 +0000 UTC m=+1375.296976080" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.475634 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.596418 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sfzq\" (UniqueName: \"kubernetes.io/projected/016c28ae-9306-4dd5-a68d-d4dd124b0f79-kube-api-access-5sfzq\") pod \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.596543 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-httpd-config\") pod \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.596650 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-combined-ca-bundle\") pod \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.596727 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-config\") pod \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.596829 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-ovndb-tls-certs\") pod \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\" (UID: \"016c28ae-9306-4dd5-a68d-d4dd124b0f79\") " Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.622918 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "016c28ae-9306-4dd5-a68d-d4dd124b0f79" (UID: "016c28ae-9306-4dd5-a68d-d4dd124b0f79"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.643950 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/016c28ae-9306-4dd5-a68d-d4dd124b0f79-kube-api-access-5sfzq" (OuterVolumeSpecName: "kube-api-access-5sfzq") pod "016c28ae-9306-4dd5-a68d-d4dd124b0f79" (UID: "016c28ae-9306-4dd5-a68d-d4dd124b0f79"). InnerVolumeSpecName "kube-api-access-5sfzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.704592 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sfzq\" (UniqueName: \"kubernetes.io/projected/016c28ae-9306-4dd5-a68d-d4dd124b0f79-kube-api-access-5sfzq\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.704640 4848 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.784047 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-config" (OuterVolumeSpecName: "config") pod "016c28ae-9306-4dd5-a68d-d4dd124b0f79" (UID: "016c28ae-9306-4dd5-a68d-d4dd124b0f79"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.797411 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "016c28ae-9306-4dd5-a68d-d4dd124b0f79" (UID: "016c28ae-9306-4dd5-a68d-d4dd124b0f79"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.822120 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.822164 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.828428 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "016c28ae-9306-4dd5-a68d-d4dd124b0f79" (UID: "016c28ae-9306-4dd5-a68d-d4dd124b0f79"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:08 crc kubenswrapper[4848]: I0128 13:09:08.926487 4848 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/016c28ae-9306-4dd5-a68d-d4dd124b0f79-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.377463 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.393364 4848 generic.go:334] "Generic (PLEG): container finished" podID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerID="f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a" exitCode=137 Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.393508 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57844b64c8-6jpl8" event={"ID":"6b5c0550-a7fd-430e-991f-9eccf00522e2","Type":"ContainerDied","Data":"f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a"} Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.393548 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57844b64c8-6jpl8" event={"ID":"6b5c0550-a7fd-430e-991f-9eccf00522e2","Type":"ContainerDied","Data":"5273fc980b21ff2ebc8243726ebd7b41a84d3336af259175b7da40789872771b"} Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.393591 4848 scope.go:117] "RemoveContainer" containerID="efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.408430 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0a69cc57-5cf8-4b44-a956-5641d66512fa","Type":"ContainerStarted","Data":"1934f152259f9015800de4ea5756bae7f0045bc739f00017267daf32139ac88f"} Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.409930 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.425732 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b675789b4-dl5kz" event={"ID":"016c28ae-9306-4dd5-a68d-d4dd124b0f79","Type":"ContainerDied","Data":"41559dcff7a4d10fc946e1decc6473254f15749b7a5c6c530d3847a1dff82cc0"} Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.425912 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b675789b4-dl5kz" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.443231 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-combined-ca-bundle\") pod \"6b5c0550-a7fd-430e-991f-9eccf00522e2\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.443376 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-config-data\") pod \"6b5c0550-a7fd-430e-991f-9eccf00522e2\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.443409 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-tls-certs\") pod \"6b5c0550-a7fd-430e-991f-9eccf00522e2\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.443540 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b5c0550-a7fd-430e-991f-9eccf00522e2-logs\") pod \"6b5c0550-a7fd-430e-991f-9eccf00522e2\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.443656 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-scripts\") pod \"6b5c0550-a7fd-430e-991f-9eccf00522e2\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.443714 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzxbr\" (UniqueName: \"kubernetes.io/projected/6b5c0550-a7fd-430e-991f-9eccf00522e2-kube-api-access-vzxbr\") pod \"6b5c0550-a7fd-430e-991f-9eccf00522e2\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.443773 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-secret-key\") pod \"6b5c0550-a7fd-430e-991f-9eccf00522e2\" (UID: \"6b5c0550-a7fd-430e-991f-9eccf00522e2\") " Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.459406 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6b5c0550-a7fd-430e-991f-9eccf00522e2" (UID: "6b5c0550-a7fd-430e-991f-9eccf00522e2"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.465735 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b5c0550-a7fd-430e-991f-9eccf00522e2-logs" (OuterVolumeSpecName: "logs") pod "6b5c0550-a7fd-430e-991f-9eccf00522e2" (UID: "6b5c0550-a7fd-430e-991f-9eccf00522e2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.467325 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.467299982 podStartE2EDuration="5.467299982s" podCreationTimestamp="2026-01-28 13:09:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:09.446857659 +0000 UTC m=+1376.359074697" watchObservedRunningTime="2026-01-28 13:09:09.467299982 +0000 UTC m=+1376.379517020" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.470074 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b5c0550-a7fd-430e-991f-9eccf00522e2-kube-api-access-vzxbr" (OuterVolumeSpecName: "kube-api-access-vzxbr") pod "6b5c0550-a7fd-430e-991f-9eccf00522e2" (UID: "6b5c0550-a7fd-430e-991f-9eccf00522e2"). InnerVolumeSpecName "kube-api-access-vzxbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.493956 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5b675789b4-dl5kz"] Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.514615 4848 generic.go:334] "Generic (PLEG): container finished" podID="6d1240ae-2011-41cb-90a2-6f050020e305" containerID="932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531" exitCode=2 Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.514690 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerDied","Data":"932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531"} Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.529780 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b5c0550-a7fd-430e-991f-9eccf00522e2" (UID: "6b5c0550-a7fd-430e-991f-9eccf00522e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.529891 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-config-data" (OuterVolumeSpecName: "config-data") pod "6b5c0550-a7fd-430e-991f-9eccf00522e2" (UID: "6b5c0550-a7fd-430e-991f-9eccf00522e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.545911 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5b675789b4-dl5kz"] Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.546406 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.546439 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b5c0550-a7fd-430e-991f-9eccf00522e2-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.546450 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzxbr\" (UniqueName: \"kubernetes.io/projected/6b5c0550-a7fd-430e-991f-9eccf00522e2-kube-api-access-vzxbr\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.546462 4848 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.546471 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.550448 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "6b5c0550-a7fd-430e-991f-9eccf00522e2" (UID: "6b5c0550-a7fd-430e-991f-9eccf00522e2"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.569463 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-scripts" (OuterVolumeSpecName: "scripts") pod "6b5c0550-a7fd-430e-991f-9eccf00522e2" (UID: "6b5c0550-a7fd-430e-991f-9eccf00522e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.648352 4848 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b5c0550-a7fd-430e-991f-9eccf00522e2-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.648622 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b5c0550-a7fd-430e-991f-9eccf00522e2-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.650720 4848 scope.go:117] "RemoveContainer" containerID="f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.696281 4848 scope.go:117] "RemoveContainer" containerID="efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10" Jan 28 13:09:09 crc kubenswrapper[4848]: E0128 13:09:09.697120 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10\": container with ID starting with efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10 not found: ID does not exist" containerID="efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.697167 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10"} err="failed to get container status \"efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10\": rpc error: code = NotFound desc = could not find container \"efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10\": container with ID starting with efa525100d82c90bd9030fdf8e4109689e3d19d1ca52de70880e0ff7774cec10 not found: ID does not exist" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.697201 4848 scope.go:117] "RemoveContainer" containerID="f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a" Jan 28 13:09:09 crc kubenswrapper[4848]: E0128 13:09:09.697936 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a\": container with ID starting with f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a not found: ID does not exist" containerID="f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.698016 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a"} err="failed to get container status \"f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a\": rpc error: code = NotFound desc = could not find container \"f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a\": container with ID starting with f3828e97d9bcdb0a7e77d0073f89982170174cbd615d4e309d5c448502a14e7a not found: ID does not exist" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.698087 4848 scope.go:117] "RemoveContainer" containerID="32e6d18168dff15a69d817ec29d8f37df1abadb68fe0ad9ba23143e4921a13ed" Jan 28 13:09:09 crc kubenswrapper[4848]: I0128 13:09:09.723914 4848 scope.go:117] "RemoveContainer" containerID="2d2ecde7461e221c7d8d3a2b1caaef35914587f90bd06e2ccad8edacc29c6cac" Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.529654 4848 generic.go:334] "Generic (PLEG): container finished" podID="6d1240ae-2011-41cb-90a2-6f050020e305" containerID="cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac" exitCode=0 Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.529727 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerDied","Data":"cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac"} Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.531295 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57844b64c8-6jpl8" Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.573389 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-57844b64c8-6jpl8"] Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.587986 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-57844b64c8-6jpl8"] Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.730406 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.731157 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-77cbfc9c5c-vjds6" Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.867981 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" path="/var/lib/kubelet/pods/016c28ae-9306-4dd5-a68d-d4dd124b0f79/volumes" Jan 28 13:09:10 crc kubenswrapper[4848]: I0128 13:09:10.869432 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" path="/var/lib/kubelet/pods/6b5c0550-a7fd-430e-991f-9eccf00522e2/volumes" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.041941 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-6g8rf"] Jan 28 13:09:14 crc kubenswrapper[4848]: E0128 13:09:14.044650 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.044671 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" Jan 28 13:09:14 crc kubenswrapper[4848]: E0128 13:09:14.044715 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-httpd" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.044723 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-httpd" Jan 28 13:09:14 crc kubenswrapper[4848]: E0128 13:09:14.044740 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon-log" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.044747 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon-log" Jan 28 13:09:14 crc kubenswrapper[4848]: E0128 13:09:14.044767 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-api" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.044773 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-api" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.044969 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.044980 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-api" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.044992 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="016c28ae-9306-4dd5-a68d-d4dd124b0f79" containerName="neutron-httpd" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.045019 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b5c0550-a7fd-430e-991f-9eccf00522e2" containerName="horizon-log" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.045814 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.055214 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-6g8rf"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.146405 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9a381e2-822b-4682-9b0a-602997cf8a74-operator-scripts\") pod \"nova-api-db-create-6g8rf\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.146501 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brmff\" (UniqueName: \"kubernetes.io/projected/c9a381e2-822b-4682-9b0a-602997cf8a74-kube-api-access-brmff\") pod \"nova-api-db-create-6g8rf\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.150841 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-pkbf5"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.153389 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.180427 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pkbf5"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.248564 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9a381e2-822b-4682-9b0a-602997cf8a74-operator-scripts\") pod \"nova-api-db-create-6g8rf\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.248647 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brmff\" (UniqueName: \"kubernetes.io/projected/c9a381e2-822b-4682-9b0a-602997cf8a74-kube-api-access-brmff\") pod \"nova-api-db-create-6g8rf\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.248720 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31edbc2f-7790-429e-a6b0-6f87c88ae72f-operator-scripts\") pod \"nova-cell0-db-create-pkbf5\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.248754 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fwld\" (UniqueName: \"kubernetes.io/projected/31edbc2f-7790-429e-a6b0-6f87c88ae72f-kube-api-access-8fwld\") pod \"nova-cell0-db-create-pkbf5\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.249713 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9a381e2-822b-4682-9b0a-602997cf8a74-operator-scripts\") pod \"nova-api-db-create-6g8rf\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.256993 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-b3ce-account-create-update-vpck6"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.258493 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.263307 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.272690 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b3ce-account-create-update-vpck6"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.290192 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brmff\" (UniqueName: \"kubernetes.io/projected/c9a381e2-822b-4682-9b0a-602997cf8a74-kube-api-access-brmff\") pod \"nova-api-db-create-6g8rf\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.357414 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9qll\" (UniqueName: \"kubernetes.io/projected/81c457b6-7dbc-41b5-ba65-227b2bc7492f-kube-api-access-l9qll\") pod \"nova-api-b3ce-account-create-update-vpck6\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.357471 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81c457b6-7dbc-41b5-ba65-227b2bc7492f-operator-scripts\") pod \"nova-api-b3ce-account-create-update-vpck6\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.357508 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31edbc2f-7790-429e-a6b0-6f87c88ae72f-operator-scripts\") pod \"nova-cell0-db-create-pkbf5\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.357574 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fwld\" (UniqueName: \"kubernetes.io/projected/31edbc2f-7790-429e-a6b0-6f87c88ae72f-kube-api-access-8fwld\") pod \"nova-cell0-db-create-pkbf5\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.359708 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31edbc2f-7790-429e-a6b0-6f87c88ae72f-operator-scripts\") pod \"nova-cell0-db-create-pkbf5\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.365575 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-xvrs4"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.367320 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.368871 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.377379 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fwld\" (UniqueName: \"kubernetes.io/projected/31edbc2f-7790-429e-a6b0-6f87c88ae72f-kube-api-access-8fwld\") pod \"nova-cell0-db-create-pkbf5\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.395652 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xvrs4"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.459900 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9qll\" (UniqueName: \"kubernetes.io/projected/81c457b6-7dbc-41b5-ba65-227b2bc7492f-kube-api-access-l9qll\") pod \"nova-api-b3ce-account-create-update-vpck6\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.459955 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81c457b6-7dbc-41b5-ba65-227b2bc7492f-operator-scripts\") pod \"nova-api-b3ce-account-create-update-vpck6\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.460050 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-operator-scripts\") pod \"nova-cell1-db-create-xvrs4\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.460087 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjrrj\" (UniqueName: \"kubernetes.io/projected/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-kube-api-access-zjrrj\") pod \"nova-cell1-db-create-xvrs4\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.461475 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81c457b6-7dbc-41b5-ba65-227b2bc7492f-operator-scripts\") pod \"nova-api-b3ce-account-create-update-vpck6\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.473886 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.474131 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-15f5-account-create-update-rd864"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.475730 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.487830 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.493675 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9qll\" (UniqueName: \"kubernetes.io/projected/81c457b6-7dbc-41b5-ba65-227b2bc7492f-kube-api-access-l9qll\") pod \"nova-api-b3ce-account-create-update-vpck6\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.504338 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-15f5-account-create-update-rd864"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.562372 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c11380b3-750f-4d67-89de-5449903ecba9-operator-scripts\") pod \"nova-cell0-15f5-account-create-update-rd864\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.562502 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-operator-scripts\") pod \"nova-cell1-db-create-xvrs4\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.562531 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjrrj\" (UniqueName: \"kubernetes.io/projected/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-kube-api-access-zjrrj\") pod \"nova-cell1-db-create-xvrs4\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.562554 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9tvr\" (UniqueName: \"kubernetes.io/projected/c11380b3-750f-4d67-89de-5449903ecba9-kube-api-access-w9tvr\") pod \"nova-cell0-15f5-account-create-update-rd864\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.572302 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-operator-scripts\") pod \"nova-cell1-db-create-xvrs4\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.592651 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.593399 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjrrj\" (UniqueName: \"kubernetes.io/projected/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-kube-api-access-zjrrj\") pod \"nova-cell1-db-create-xvrs4\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.669004 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.669128 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c11380b3-750f-4d67-89de-5449903ecba9-operator-scripts\") pod \"nova-cell0-15f5-account-create-update-rd864\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.670270 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9tvr\" (UniqueName: \"kubernetes.io/projected/c11380b3-750f-4d67-89de-5449903ecba9-kube-api-access-w9tvr\") pod \"nova-cell0-15f5-account-create-update-rd864\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.670288 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c11380b3-750f-4d67-89de-5449903ecba9-operator-scripts\") pod \"nova-cell0-15f5-account-create-update-rd864\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.671095 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-0dd7-account-create-update-hbr27"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.674675 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.677357 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.747469 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9tvr\" (UniqueName: \"kubernetes.io/projected/c11380b3-750f-4d67-89de-5449903ecba9-kube-api-access-w9tvr\") pod \"nova-cell0-15f5-account-create-update-rd864\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.756882 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0dd7-account-create-update-hbr27"] Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.761935 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.814491 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4863c1ad-76a8-4892-a664-c7deee6ed995-operator-scripts\") pod \"nova-cell1-0dd7-account-create-update-hbr27\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.816161 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9dgp\" (UniqueName: \"kubernetes.io/projected/4863c1ad-76a8-4892-a664-c7deee6ed995-kube-api-access-q9dgp\") pod \"nova-cell1-0dd7-account-create-update-hbr27\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.918743 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4863c1ad-76a8-4892-a664-c7deee6ed995-operator-scripts\") pod \"nova-cell1-0dd7-account-create-update-hbr27\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.918823 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9dgp\" (UniqueName: \"kubernetes.io/projected/4863c1ad-76a8-4892-a664-c7deee6ed995-kube-api-access-q9dgp\") pod \"nova-cell1-0dd7-account-create-update-hbr27\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.920498 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4863c1ad-76a8-4892-a664-c7deee6ed995-operator-scripts\") pod \"nova-cell1-0dd7-account-create-update-hbr27\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:14 crc kubenswrapper[4848]: I0128 13:09:14.953678 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9dgp\" (UniqueName: \"kubernetes.io/projected/4863c1ad-76a8-4892-a664-c7deee6ed995-kube-api-access-q9dgp\") pod \"nova-cell1-0dd7-account-create-update-hbr27\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.103721 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-6g8rf"] Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.115029 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.476984 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pkbf5"] Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.657915 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pkbf5" event={"ID":"31edbc2f-7790-429e-a6b0-6f87c88ae72f","Type":"ContainerStarted","Data":"7946be2e43ed6d10380de494a1ad8d91c91e576cf53a5f3c713fe6a6ea32cc64"} Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.680848 4848 generic.go:334] "Generic (PLEG): container finished" podID="6d1240ae-2011-41cb-90a2-6f050020e305" containerID="1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2" exitCode=0 Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.681414 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerDied","Data":"1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2"} Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.693174 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6g8rf" event={"ID":"c9a381e2-822b-4682-9b0a-602997cf8a74","Type":"ContainerStarted","Data":"ab2f12c914e390b78ab5892d7bfe78a6c4162ace4b2c275483f03a4451745498"} Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.821201 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xvrs4"] Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.874129 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b3ce-account-create-update-vpck6"] Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.906825 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 28 13:09:15 crc kubenswrapper[4848]: I0128 13:09:15.993019 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-15f5-account-create-update-rd864"] Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.035317 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.038210 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0dd7-account-create-update-hbr27"] Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.712310 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pkbf5" event={"ID":"31edbc2f-7790-429e-a6b0-6f87c88ae72f","Type":"ContainerStarted","Data":"cc3ba5f023093e8d8f0907f78f6779698ce152fe455974dd128ebfc9b3f89bd5"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.718984 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xvrs4" event={"ID":"3b57b9b0-bb70-43d2-a97f-7e0372e3971c","Type":"ContainerStarted","Data":"edf32bc74c1e8aafe6e88b62505f05ec787f1cbdd02deaf06a232c7d0927069f"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.719054 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xvrs4" event={"ID":"3b57b9b0-bb70-43d2-a97f-7e0372e3971c","Type":"ContainerStarted","Data":"602dfb9c3e8d353709e24230ac2d08673b0cd101582324e219d1edb28fb04f56"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.722801 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b3ce-account-create-update-vpck6" event={"ID":"81c457b6-7dbc-41b5-ba65-227b2bc7492f","Type":"ContainerStarted","Data":"4c073da8e3337b45af4f84c2af9f68e9123607f84283718c7e3082044e5ac82b"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.722845 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b3ce-account-create-update-vpck6" event={"ID":"81c457b6-7dbc-41b5-ba65-227b2bc7492f","Type":"ContainerStarted","Data":"c2d32e790d61387ee0db10aa7149cbcea3f2dee58b6b73944c510ce8114cf1e5"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.747117 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6g8rf" event={"ID":"c9a381e2-822b-4682-9b0a-602997cf8a74","Type":"ContainerStarted","Data":"1d08abb1260dd36dc5beab0a1a999d5ed404cf6ad84d269670b995997f3c1848"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.758795 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-pkbf5" podStartSLOduration=2.758770491 podStartE2EDuration="2.758770491s" podCreationTimestamp="2026-01-28 13:09:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:16.742159424 +0000 UTC m=+1383.654376472" watchObservedRunningTime="2026-01-28 13:09:16.758770491 +0000 UTC m=+1383.670987529" Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.759541 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" event={"ID":"4863c1ad-76a8-4892-a664-c7deee6ed995","Type":"ContainerStarted","Data":"b81354ae161679f02349312cf75a1c9e1a540d26882e99b5d96124bc5066fb9f"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.759612 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" event={"ID":"4863c1ad-76a8-4892-a664-c7deee6ed995","Type":"ContainerStarted","Data":"7328bd5b63ea5e717877c2f81f1dc550d0caa3b26520d83aad5a079d2940ffc6"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.787824 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-15f5-account-create-update-rd864" event={"ID":"c11380b3-750f-4d67-89de-5449903ecba9","Type":"ContainerStarted","Data":"f0824b49077eeec8e6cb0cf213b42661d575cca9ecdc679edee1dcbb91abb8bf"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.787869 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-15f5-account-create-update-rd864" event={"ID":"c11380b3-750f-4d67-89de-5449903ecba9","Type":"ContainerStarted","Data":"a0f6501ac17c4efff8881c612f1e7cae5754da9792dbca5aa5d394d43055641c"} Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.868937 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-b3ce-account-create-update-vpck6" podStartSLOduration=2.868905073 podStartE2EDuration="2.868905073s" podCreationTimestamp="2026-01-28 13:09:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:16.795868042 +0000 UTC m=+1383.708085080" watchObservedRunningTime="2026-01-28 13:09:16.868905073 +0000 UTC m=+1383.781122111" Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.886149 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-15f5-account-create-update-rd864" podStartSLOduration=2.886113937 podStartE2EDuration="2.886113937s" podCreationTimestamp="2026-01-28 13:09:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:16.820925272 +0000 UTC m=+1383.733142310" watchObservedRunningTime="2026-01-28 13:09:16.886113937 +0000 UTC m=+1383.798330995" Jan 28 13:09:16 crc kubenswrapper[4848]: I0128 13:09:16.968716 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" podStartSLOduration=2.968678299 podStartE2EDuration="2.968678299s" podCreationTimestamp="2026-01-28 13:09:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:16.859133173 +0000 UTC m=+1383.771350211" watchObservedRunningTime="2026-01-28 13:09:16.968678299 +0000 UTC m=+1383.880895327" Jan 28 13:09:17 crc kubenswrapper[4848]: E0128 13:09:17.187108 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-conmon-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81c457b6_7dbc_41b5_ba65_227b2bc7492f.slice/crio-4c073da8e3337b45af4f84c2af9f68e9123607f84283718c7e3082044e5ac82b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81c457b6_7dbc_41b5_ba65_227b2bc7492f.slice/crio-conmon-4c073da8e3337b45af4f84c2af9f68e9123607f84283718c7e3082044e5ac82b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4863c1ad_76a8_4892_a664_c7deee6ed995.slice/crio-conmon-b81354ae161679f02349312cf75a1c9e1a540d26882e99b5d96124bc5066fb9f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4863c1ad_76a8_4892_a664_c7deee6ed995.slice/crio-b81354ae161679f02349312cf75a1c9e1a540d26882e99b5d96124bc5066fb9f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc11380b3_750f_4d67_89de_5449903ecba9.slice/crio-conmon-f0824b49077eeec8e6cb0cf213b42661d575cca9ecdc679edee1dcbb91abb8bf.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9a381e2_822b_4682_9b0a_602997cf8a74.slice/crio-conmon-1d08abb1260dd36dc5beab0a1a999d5ed404cf6ad84d269670b995997f3c1848.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc11380b3_750f_4d67_89de_5449903ecba9.slice/crio-f0824b49077eeec8e6cb0cf213b42661d575cca9ecdc679edee1dcbb91abb8bf.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.800312 4848 generic.go:334] "Generic (PLEG): container finished" podID="4863c1ad-76a8-4892-a664-c7deee6ed995" containerID="b81354ae161679f02349312cf75a1c9e1a540d26882e99b5d96124bc5066fb9f" exitCode=0 Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.800735 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" event={"ID":"4863c1ad-76a8-4892-a664-c7deee6ed995","Type":"ContainerDied","Data":"b81354ae161679f02349312cf75a1c9e1a540d26882e99b5d96124bc5066fb9f"} Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.804342 4848 generic.go:334] "Generic (PLEG): container finished" podID="c11380b3-750f-4d67-89de-5449903ecba9" containerID="f0824b49077eeec8e6cb0cf213b42661d575cca9ecdc679edee1dcbb91abb8bf" exitCode=0 Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.804414 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-15f5-account-create-update-rd864" event={"ID":"c11380b3-750f-4d67-89de-5449903ecba9","Type":"ContainerDied","Data":"f0824b49077eeec8e6cb0cf213b42661d575cca9ecdc679edee1dcbb91abb8bf"} Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.806980 4848 generic.go:334] "Generic (PLEG): container finished" podID="31edbc2f-7790-429e-a6b0-6f87c88ae72f" containerID="cc3ba5f023093e8d8f0907f78f6779698ce152fe455974dd128ebfc9b3f89bd5" exitCode=0 Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.807072 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pkbf5" event={"ID":"31edbc2f-7790-429e-a6b0-6f87c88ae72f","Type":"ContainerDied","Data":"cc3ba5f023093e8d8f0907f78f6779698ce152fe455974dd128ebfc9b3f89bd5"} Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.809505 4848 generic.go:334] "Generic (PLEG): container finished" podID="3b57b9b0-bb70-43d2-a97f-7e0372e3971c" containerID="edf32bc74c1e8aafe6e88b62505f05ec787f1cbdd02deaf06a232c7d0927069f" exitCode=0 Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.809603 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xvrs4" event={"ID":"3b57b9b0-bb70-43d2-a97f-7e0372e3971c","Type":"ContainerDied","Data":"edf32bc74c1e8aafe6e88b62505f05ec787f1cbdd02deaf06a232c7d0927069f"} Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.812218 4848 generic.go:334] "Generic (PLEG): container finished" podID="81c457b6-7dbc-41b5-ba65-227b2bc7492f" containerID="4c073da8e3337b45af4f84c2af9f68e9123607f84283718c7e3082044e5ac82b" exitCode=0 Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.812291 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b3ce-account-create-update-vpck6" event={"ID":"81c457b6-7dbc-41b5-ba65-227b2bc7492f","Type":"ContainerDied","Data":"4c073da8e3337b45af4f84c2af9f68e9123607f84283718c7e3082044e5ac82b"} Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.814444 4848 generic.go:334] "Generic (PLEG): container finished" podID="c9a381e2-822b-4682-9b0a-602997cf8a74" containerID="1d08abb1260dd36dc5beab0a1a999d5ed404cf6ad84d269670b995997f3c1848" exitCode=0 Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.814501 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6g8rf" event={"ID":"c9a381e2-822b-4682-9b0a-602997cf8a74","Type":"ContainerDied","Data":"1d08abb1260dd36dc5beab0a1a999d5ed404cf6ad84d269670b995997f3c1848"} Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.850582 4848 scope.go:117] "RemoveContainer" containerID="ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1" Jan 28 13:09:17 crc kubenswrapper[4848]: I0128 13:09:17.940833 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.368594 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.436545 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-operator-scripts\") pod \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.437165 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjrrj\" (UniqueName: \"kubernetes.io/projected/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-kube-api-access-zjrrj\") pod \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\" (UID: \"3b57b9b0-bb70-43d2-a97f-7e0372e3971c\") " Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.437510 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3b57b9b0-bb70-43d2-a97f-7e0372e3971c" (UID: "3b57b9b0-bb70-43d2-a97f-7e0372e3971c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.439661 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.450164 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-kube-api-access-zjrrj" (OuterVolumeSpecName: "kube-api-access-zjrrj") pod "3b57b9b0-bb70-43d2-a97f-7e0372e3971c" (UID: "3b57b9b0-bb70-43d2-a97f-7e0372e3971c"). InnerVolumeSpecName "kube-api-access-zjrrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.544807 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjrrj\" (UniqueName: \"kubernetes.io/projected/3b57b9b0-bb70-43d2-a97f-7e0372e3971c-kube-api-access-zjrrj\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.567627 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.647984 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brmff\" (UniqueName: \"kubernetes.io/projected/c9a381e2-822b-4682-9b0a-602997cf8a74-kube-api-access-brmff\") pod \"c9a381e2-822b-4682-9b0a-602997cf8a74\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.648302 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9a381e2-822b-4682-9b0a-602997cf8a74-operator-scripts\") pod \"c9a381e2-822b-4682-9b0a-602997cf8a74\" (UID: \"c9a381e2-822b-4682-9b0a-602997cf8a74\") " Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.649194 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9a381e2-822b-4682-9b0a-602997cf8a74-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c9a381e2-822b-4682-9b0a-602997cf8a74" (UID: "c9a381e2-822b-4682-9b0a-602997cf8a74"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.654552 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9a381e2-822b-4682-9b0a-602997cf8a74-kube-api-access-brmff" (OuterVolumeSpecName: "kube-api-access-brmff") pod "c9a381e2-822b-4682-9b0a-602997cf8a74" (UID: "c9a381e2-822b-4682-9b0a-602997cf8a74"). InnerVolumeSpecName "kube-api-access-brmff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.750834 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brmff\" (UniqueName: \"kubernetes.io/projected/c9a381e2-822b-4682-9b0a-602997cf8a74-kube-api-access-brmff\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.750875 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9a381e2-822b-4682-9b0a-602997cf8a74-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.844927 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerStarted","Data":"d7f6f1710a36a7bd2d73156d09139a7b10e823477ed7cb6590bb5bd3d6f2a90b"} Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.846809 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xvrs4" event={"ID":"3b57b9b0-bb70-43d2-a97f-7e0372e3971c","Type":"ContainerDied","Data":"602dfb9c3e8d353709e24230ac2d08673b0cd101582324e219d1edb28fb04f56"} Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.846858 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="602dfb9c3e8d353709e24230ac2d08673b0cd101582324e219d1edb28fb04f56" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.846961 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xvrs4" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.861721 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6g8rf" Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.889217 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6g8rf" event={"ID":"c9a381e2-822b-4682-9b0a-602997cf8a74","Type":"ContainerDied","Data":"ab2f12c914e390b78ab5892d7bfe78a6c4162ace4b2c275483f03a4451745498"} Jan 28 13:09:18 crc kubenswrapper[4848]: I0128 13:09:18.889286 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab2f12c914e390b78ab5892d7bfe78a6c4162ace4b2c275483f03a4451745498" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.318568 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.318623 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.372115 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.401901 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.471624 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9qll\" (UniqueName: \"kubernetes.io/projected/81c457b6-7dbc-41b5-ba65-227b2bc7492f-kube-api-access-l9qll\") pod \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.472195 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81c457b6-7dbc-41b5-ba65-227b2bc7492f-operator-scripts\") pod \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\" (UID: \"81c457b6-7dbc-41b5-ba65-227b2bc7492f\") " Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.474236 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81c457b6-7dbc-41b5-ba65-227b2bc7492f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "81c457b6-7dbc-41b5-ba65-227b2bc7492f" (UID: "81c457b6-7dbc-41b5-ba65-227b2bc7492f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.485065 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81c457b6-7dbc-41b5-ba65-227b2bc7492f-kube-api-access-l9qll" (OuterVolumeSpecName: "kube-api-access-l9qll") pod "81c457b6-7dbc-41b5-ba65-227b2bc7492f" (UID: "81c457b6-7dbc-41b5-ba65-227b2bc7492f"). InnerVolumeSpecName "kube-api-access-l9qll". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.577765 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9qll\" (UniqueName: \"kubernetes.io/projected/81c457b6-7dbc-41b5-ba65-227b2bc7492f-kube-api-access-l9qll\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.577812 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81c457b6-7dbc-41b5-ba65-227b2bc7492f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.680601 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.786334 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9dgp\" (UniqueName: \"kubernetes.io/projected/4863c1ad-76a8-4892-a664-c7deee6ed995-kube-api-access-q9dgp\") pod \"4863c1ad-76a8-4892-a664-c7deee6ed995\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.794500 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4863c1ad-76a8-4892-a664-c7deee6ed995-operator-scripts\") pod \"4863c1ad-76a8-4892-a664-c7deee6ed995\" (UID: \"4863c1ad-76a8-4892-a664-c7deee6ed995\") " Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.796154 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4863c1ad-76a8-4892-a664-c7deee6ed995-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4863c1ad-76a8-4892-a664-c7deee6ed995" (UID: "4863c1ad-76a8-4892-a664-c7deee6ed995"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.802644 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4863c1ad-76a8-4892-a664-c7deee6ed995-kube-api-access-q9dgp" (OuterVolumeSpecName: "kube-api-access-q9dgp") pod "4863c1ad-76a8-4892-a664-c7deee6ed995" (UID: "4863c1ad-76a8-4892-a664-c7deee6ed995"). InnerVolumeSpecName "kube-api-access-q9dgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.832912 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.889767 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.901346 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fwld\" (UniqueName: \"kubernetes.io/projected/31edbc2f-7790-429e-a6b0-6f87c88ae72f-kube-api-access-8fwld\") pod \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.901407 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31edbc2f-7790-429e-a6b0-6f87c88ae72f-operator-scripts\") pod \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\" (UID: \"31edbc2f-7790-429e-a6b0-6f87c88ae72f\") " Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.904494 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9dgp\" (UniqueName: \"kubernetes.io/projected/4863c1ad-76a8-4892-a664-c7deee6ed995-kube-api-access-q9dgp\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.904523 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4863c1ad-76a8-4892-a664-c7deee6ed995-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.908219 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31edbc2f-7790-429e-a6b0-6f87c88ae72f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "31edbc2f-7790-429e-a6b0-6f87c88ae72f" (UID: "31edbc2f-7790-429e-a6b0-6f87c88ae72f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.911212 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31edbc2f-7790-429e-a6b0-6f87c88ae72f-kube-api-access-8fwld" (OuterVolumeSpecName: "kube-api-access-8fwld") pod "31edbc2f-7790-429e-a6b0-6f87c88ae72f" (UID: "31edbc2f-7790-429e-a6b0-6f87c88ae72f"). InnerVolumeSpecName "kube-api-access-8fwld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.922959 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b3ce-account-create-update-vpck6" event={"ID":"81c457b6-7dbc-41b5-ba65-227b2bc7492f","Type":"ContainerDied","Data":"c2d32e790d61387ee0db10aa7149cbcea3f2dee58b6b73944c510ce8114cf1e5"} Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.923015 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2d32e790d61387ee0db10aa7149cbcea3f2dee58b6b73944c510ce8114cf1e5" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.923091 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b3ce-account-create-update-vpck6" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.939514 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.939542 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0dd7-account-create-update-hbr27" event={"ID":"4863c1ad-76a8-4892-a664-c7deee6ed995","Type":"ContainerDied","Data":"7328bd5b63ea5e717877c2f81f1dc550d0caa3b26520d83aad5a079d2940ffc6"} Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.939591 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7328bd5b63ea5e717877c2f81f1dc550d0caa3b26520d83aad5a079d2940ffc6" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.948587 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-15f5-account-create-update-rd864" event={"ID":"c11380b3-750f-4d67-89de-5449903ecba9","Type":"ContainerDied","Data":"a0f6501ac17c4efff8881c612f1e7cae5754da9792dbca5aa5d394d43055641c"} Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.948645 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0f6501ac17c4efff8881c612f1e7cae5754da9792dbca5aa5d394d43055641c" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.948734 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-15f5-account-create-update-rd864" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.953529 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pkbf5" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.954489 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pkbf5" event={"ID":"31edbc2f-7790-429e-a6b0-6f87c88ae72f","Type":"ContainerDied","Data":"7946be2e43ed6d10380de494a1ad8d91c91e576cf53a5f3c713fe6a6ea32cc64"} Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.954583 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7946be2e43ed6d10380de494a1ad8d91c91e576cf53a5f3c713fe6a6ea32cc64" Jan 28 13:09:19 crc kubenswrapper[4848]: I0128 13:09:19.993079 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.006762 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9tvr\" (UniqueName: \"kubernetes.io/projected/c11380b3-750f-4d67-89de-5449903ecba9-kube-api-access-w9tvr\") pod \"c11380b3-750f-4d67-89de-5449903ecba9\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.006922 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c11380b3-750f-4d67-89de-5449903ecba9-operator-scripts\") pod \"c11380b3-750f-4d67-89de-5449903ecba9\" (UID: \"c11380b3-750f-4d67-89de-5449903ecba9\") " Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.007588 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fwld\" (UniqueName: \"kubernetes.io/projected/31edbc2f-7790-429e-a6b0-6f87c88ae72f-kube-api-access-8fwld\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.007601 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31edbc2f-7790-429e-a6b0-6f87c88ae72f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.009018 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c11380b3-750f-4d67-89de-5449903ecba9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c11380b3-750f-4d67-89de-5449903ecba9" (UID: "c11380b3-750f-4d67-89de-5449903ecba9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.027742 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c11380b3-750f-4d67-89de-5449903ecba9-kube-api-access-w9tvr" (OuterVolumeSpecName: "kube-api-access-w9tvr") pod "c11380b3-750f-4d67-89de-5449903ecba9" (UID: "c11380b3-750f-4d67-89de-5449903ecba9"). InnerVolumeSpecName "kube-api-access-w9tvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.110535 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9tvr\" (UniqueName: \"kubernetes.io/projected/c11380b3-750f-4d67-89de-5449903ecba9-kube-api-access-w9tvr\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:20 crc kubenswrapper[4848]: I0128 13:09:20.110605 4848 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c11380b3-750f-4d67-89de-5449903ecba9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.913136 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8rd4v"] Jan 28 13:09:24 crc kubenswrapper[4848]: E0128 13:09:24.914457 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9a381e2-822b-4682-9b0a-602997cf8a74" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914476 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9a381e2-822b-4682-9b0a-602997cf8a74" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: E0128 13:09:24.914496 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b57b9b0-bb70-43d2-a97f-7e0372e3971c" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914503 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b57b9b0-bb70-43d2-a97f-7e0372e3971c" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: E0128 13:09:24.914516 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81c457b6-7dbc-41b5-ba65-227b2bc7492f" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914523 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="81c457b6-7dbc-41b5-ba65-227b2bc7492f" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: E0128 13:09:24.914539 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c11380b3-750f-4d67-89de-5449903ecba9" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914547 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c11380b3-750f-4d67-89de-5449903ecba9" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: E0128 13:09:24.914560 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4863c1ad-76a8-4892-a664-c7deee6ed995" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914568 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4863c1ad-76a8-4892-a664-c7deee6ed995" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: E0128 13:09:24.914598 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31edbc2f-7790-429e-a6b0-6f87c88ae72f" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914606 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="31edbc2f-7790-429e-a6b0-6f87c88ae72f" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914827 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c11380b3-750f-4d67-89de-5449903ecba9" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914849 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4863c1ad-76a8-4892-a664-c7deee6ed995" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914860 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="31edbc2f-7790-429e-a6b0-6f87c88ae72f" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914873 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9a381e2-822b-4682-9b0a-602997cf8a74" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914881 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b57b9b0-bb70-43d2-a97f-7e0372e3971c" containerName="mariadb-database-create" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.914890 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="81c457b6-7dbc-41b5-ba65-227b2bc7492f" containerName="mariadb-account-create-update" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.915862 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.922089 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.926811 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hbjw8" Jan 28 13:09:24 crc kubenswrapper[4848]: I0128 13:09:24.927049 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.025388 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8rd4v"] Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.030821 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-config-data\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.030904 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-scripts\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.030961 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6nsr\" (UniqueName: \"kubernetes.io/projected/c1ace158-4e32-4a9a-b350-4afddceb574c-kube-api-access-l6nsr\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.031118 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.132836 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.132966 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-config-data\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.132997 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-scripts\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.133032 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6nsr\" (UniqueName: \"kubernetes.io/projected/c1ace158-4e32-4a9a-b350-4afddceb574c-kube-api-access-l6nsr\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.142731 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-scripts\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.142868 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.143772 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-config-data\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.153669 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6nsr\" (UniqueName: \"kubernetes.io/projected/c1ace158-4e32-4a9a-b350-4afddceb574c-kube-api-access-l6nsr\") pod \"nova-cell0-conductor-db-sync-8rd4v\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.264680 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:09:25 crc kubenswrapper[4848]: W0128 13:09:25.771762 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1ace158_4e32_4a9a_b350_4afddceb574c.slice/crio-00a501e6da790970f9ca2a24d34ee18b3785ab2aafeba462c5c09eb374c21c0e WatchSource:0}: Error finding container 00a501e6da790970f9ca2a24d34ee18b3785ab2aafeba462c5c09eb374c21c0e: Status 404 returned error can't find the container with id 00a501e6da790970f9ca2a24d34ee18b3785ab2aafeba462c5c09eb374c21c0e Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.775594 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:09:25 crc kubenswrapper[4848]: I0128 13:09:25.796427 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8rd4v"] Jan 28 13:09:26 crc kubenswrapper[4848]: I0128 13:09:26.028397 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" event={"ID":"c1ace158-4e32-4a9a-b350-4afddceb574c","Type":"ContainerStarted","Data":"00a501e6da790970f9ca2a24d34ee18b3785ab2aafeba462c5c09eb374c21c0e"} Jan 28 13:09:27 crc kubenswrapper[4848]: E0128 13:09:27.441600 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-conmon-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:09:32 crc kubenswrapper[4848]: I0128 13:09:32.655856 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 13:09:37 crc kubenswrapper[4848]: I0128 13:09:37.684011 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:09:37 crc kubenswrapper[4848]: I0128 13:09:37.685116 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" containerID="cri-o://d7f6f1710a36a7bd2d73156d09139a7b10e823477ed7cb6590bb5bd3d6f2a90b" gracePeriod=30 Jan 28 13:09:37 crc kubenswrapper[4848]: E0128 13:09:37.751659 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-conmon-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:09:38 crc kubenswrapper[4848]: I0128 13:09:38.203193 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" event={"ID":"c1ace158-4e32-4a9a-b350-4afddceb574c","Type":"ContainerStarted","Data":"7ac8c112211d7b6ad12d0f8daf1a5d6983909337df04ebfa23aafb9bb60e5ed1"} Jan 28 13:09:38 crc kubenswrapper[4848]: I0128 13:09:38.232769 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" podStartSLOduration=2.761964898 podStartE2EDuration="14.232724564s" podCreationTimestamp="2026-01-28 13:09:24 +0000 UTC" firstStartedPulling="2026-01-28 13:09:25.775327658 +0000 UTC m=+1392.687544706" lastFinishedPulling="2026-01-28 13:09:37.246087334 +0000 UTC m=+1404.158304372" observedRunningTime="2026-01-28 13:09:38.224077766 +0000 UTC m=+1405.136294824" watchObservedRunningTime="2026-01-28 13:09:38.232724564 +0000 UTC m=+1405.144941602" Jan 28 13:09:38 crc kubenswrapper[4848]: I0128 13:09:38.900969 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.013259 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-scripts\") pod \"6d1240ae-2011-41cb-90a2-6f050020e305\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.013350 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhwx4\" (UniqueName: \"kubernetes.io/projected/6d1240ae-2011-41cb-90a2-6f050020e305-kube-api-access-fhwx4\") pod \"6d1240ae-2011-41cb-90a2-6f050020e305\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.013429 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-run-httpd\") pod \"6d1240ae-2011-41cb-90a2-6f050020e305\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.013478 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-sg-core-conf-yaml\") pod \"6d1240ae-2011-41cb-90a2-6f050020e305\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.013522 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-log-httpd\") pod \"6d1240ae-2011-41cb-90a2-6f050020e305\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.013578 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-config-data\") pod \"6d1240ae-2011-41cb-90a2-6f050020e305\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.013844 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-combined-ca-bundle\") pod \"6d1240ae-2011-41cb-90a2-6f050020e305\" (UID: \"6d1240ae-2011-41cb-90a2-6f050020e305\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.015162 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6d1240ae-2011-41cb-90a2-6f050020e305" (UID: "6d1240ae-2011-41cb-90a2-6f050020e305"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.015563 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6d1240ae-2011-41cb-90a2-6f050020e305" (UID: "6d1240ae-2011-41cb-90a2-6f050020e305"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.029673 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-scripts" (OuterVolumeSpecName: "scripts") pod "6d1240ae-2011-41cb-90a2-6f050020e305" (UID: "6d1240ae-2011-41cb-90a2-6f050020e305"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.037476 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d1240ae-2011-41cb-90a2-6f050020e305-kube-api-access-fhwx4" (OuterVolumeSpecName: "kube-api-access-fhwx4") pod "6d1240ae-2011-41cb-90a2-6f050020e305" (UID: "6d1240ae-2011-41cb-90a2-6f050020e305"). InnerVolumeSpecName "kube-api-access-fhwx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.062815 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6d1240ae-2011-41cb-90a2-6f050020e305" (UID: "6d1240ae-2011-41cb-90a2-6f050020e305"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.119175 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.119239 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhwx4\" (UniqueName: \"kubernetes.io/projected/6d1240ae-2011-41cb-90a2-6f050020e305-kube-api-access-fhwx4\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.119272 4848 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.119281 4848 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.119290 4848 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d1240ae-2011-41cb-90a2-6f050020e305-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.119434 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d1240ae-2011-41cb-90a2-6f050020e305" (UID: "6d1240ae-2011-41cb-90a2-6f050020e305"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.182216 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-config-data" (OuterVolumeSpecName: "config-data") pod "6d1240ae-2011-41cb-90a2-6f050020e305" (UID: "6d1240ae-2011-41cb-90a2-6f050020e305"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.226014 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.226049 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d1240ae-2011-41cb-90a2-6f050020e305-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.230543 4848 generic.go:334] "Generic (PLEG): container finished" podID="88151fad-4442-4d32-a675-f89f070ed086" containerID="d7f6f1710a36a7bd2d73156d09139a7b10e823477ed7cb6590bb5bd3d6f2a90b" exitCode=0 Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.230604 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerDied","Data":"d7f6f1710a36a7bd2d73156d09139a7b10e823477ed7cb6590bb5bd3d6f2a90b"} Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.230655 4848 scope.go:117] "RemoveContainer" containerID="ce88f840eeef85f472f5159f808ea0c47867c11369859cddc8b8ed624a03bad1" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.242578 4848 generic.go:334] "Generic (PLEG): container finished" podID="6d1240ae-2011-41cb-90a2-6f050020e305" containerID="248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d" exitCode=137 Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.242842 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.243340 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerDied","Data":"248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d"} Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.243412 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d1240ae-2011-41cb-90a2-6f050020e305","Type":"ContainerDied","Data":"cb1be744ec3f12f54559009d0d95fb359a966eba3665ebf39126ea6afcd18222"} Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.309235 4848 scope.go:117] "RemoveContainer" containerID="248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.326087 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.347430 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.356982 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.357646 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="sg-core" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357667 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="sg-core" Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.357686 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="proxy-httpd" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357692 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="proxy-httpd" Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.357712 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-central-agent" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357720 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-central-agent" Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.357743 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-notification-agent" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357752 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-notification-agent" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357954 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="sg-core" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357972 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-notification-agent" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357981 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="ceilometer-central-agent" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.357994 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" containerName="proxy-httpd" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.360557 4848 scope.go:117] "RemoveContainer" containerID="932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.363934 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.369902 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.370095 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.372065 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.422710 4848 scope.go:117] "RemoveContainer" containerID="cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.441392 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.487442 4848 scope.go:117] "RemoveContainer" containerID="1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.531872 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5cjn\" (UniqueName: \"kubernetes.io/projected/88151fad-4442-4d32-a675-f89f070ed086-kube-api-access-h5cjn\") pod \"88151fad-4442-4d32-a675-f89f070ed086\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.531964 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-combined-ca-bundle\") pod \"88151fad-4442-4d32-a675-f89f070ed086\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532069 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-config-data\") pod \"88151fad-4442-4d32-a675-f89f070ed086\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532138 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88151fad-4442-4d32-a675-f89f070ed086-logs\") pod \"88151fad-4442-4d32-a675-f89f070ed086\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532184 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-custom-prometheus-ca\") pod \"88151fad-4442-4d32-a675-f89f070ed086\" (UID: \"88151fad-4442-4d32-a675-f89f070ed086\") " Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532621 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-run-httpd\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532705 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g54d2\" (UniqueName: \"kubernetes.io/projected/a8c06e17-b640-4cd3-9574-11cbb37abd2a-kube-api-access-g54d2\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532757 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-config-data\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532779 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532834 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532921 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-log-httpd\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.532997 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-scripts\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.533829 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88151fad-4442-4d32-a675-f89f070ed086-logs" (OuterVolumeSpecName: "logs") pod "88151fad-4442-4d32-a675-f89f070ed086" (UID: "88151fad-4442-4d32-a675-f89f070ed086"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.538824 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88151fad-4442-4d32-a675-f89f070ed086-kube-api-access-h5cjn" (OuterVolumeSpecName: "kube-api-access-h5cjn") pod "88151fad-4442-4d32-a675-f89f070ed086" (UID: "88151fad-4442-4d32-a675-f89f070ed086"). InnerVolumeSpecName "kube-api-access-h5cjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.549368 4848 scope.go:117] "RemoveContainer" containerID="248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d" Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.557703 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d\": container with ID starting with 248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d not found: ID does not exist" containerID="248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.557815 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d"} err="failed to get container status \"248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d\": rpc error: code = NotFound desc = could not find container \"248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d\": container with ID starting with 248be5e3947a8bad2d8bf6390c0df78826312ba056358c41d5cc6043b9a3f74d not found: ID does not exist" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.557870 4848 scope.go:117] "RemoveContainer" containerID="932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531" Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.562137 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531\": container with ID starting with 932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531 not found: ID does not exist" containerID="932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.562225 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531"} err="failed to get container status \"932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531\": rpc error: code = NotFound desc = could not find container \"932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531\": container with ID starting with 932770d27ca40eac6014e7e0cd845ae3eb0a5aef787177c06eab047746e4a531 not found: ID does not exist" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.562278 4848 scope.go:117] "RemoveContainer" containerID="cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac" Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.565455 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac\": container with ID starting with cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac not found: ID does not exist" containerID="cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.565507 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac"} err="failed to get container status \"cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac\": rpc error: code = NotFound desc = could not find container \"cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac\": container with ID starting with cd236419c2523b2deab59cb5b6c9812c35a711ef5a45e89a6120252db8f827ac not found: ID does not exist" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.565542 4848 scope.go:117] "RemoveContainer" containerID="1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2" Jan 28 13:09:39 crc kubenswrapper[4848]: E0128 13:09:39.566164 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2\": container with ID starting with 1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2 not found: ID does not exist" containerID="1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.566228 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2"} err="failed to get container status \"1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2\": rpc error: code = NotFound desc = could not find container \"1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2\": container with ID starting with 1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2 not found: ID does not exist" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.579396 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88151fad-4442-4d32-a675-f89f070ed086" (UID: "88151fad-4442-4d32-a675-f89f070ed086"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.592861 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "88151fad-4442-4d32-a675-f89f070ed086" (UID: "88151fad-4442-4d32-a675-f89f070ed086"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.596004 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-config-data" (OuterVolumeSpecName: "config-data") pod "88151fad-4442-4d32-a675-f89f070ed086" (UID: "88151fad-4442-4d32-a675-f89f070ed086"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635004 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-scripts\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635082 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-run-httpd\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635146 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g54d2\" (UniqueName: \"kubernetes.io/projected/a8c06e17-b640-4cd3-9574-11cbb37abd2a-kube-api-access-g54d2\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635208 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-config-data\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635226 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635281 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635345 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-log-httpd\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635434 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5cjn\" (UniqueName: \"kubernetes.io/projected/88151fad-4442-4d32-a675-f89f070ed086-kube-api-access-h5cjn\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635451 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635462 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635473 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88151fad-4442-4d32-a675-f89f070ed086-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635511 4848 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/88151fad-4442-4d32-a675-f89f070ed086-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635850 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-run-httpd\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.635964 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-log-httpd\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.644697 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.645471 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-config-data\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.647399 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-scripts\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.652965 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.666324 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g54d2\" (UniqueName: \"kubernetes.io/projected/a8c06e17-b640-4cd3-9574-11cbb37abd2a-kube-api-access-g54d2\") pod \"ceilometer-0\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " pod="openstack/ceilometer-0" Jan 28 13:09:39 crc kubenswrapper[4848]: I0128 13:09:39.694213 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.258945 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"88151fad-4442-4d32-a675-f89f070ed086","Type":"ContainerDied","Data":"b6c3c8f29aa01731f355045457e1becd67b5e1ea57ddf758ea5483e25f3c40a5"} Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.258994 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.259430 4848 scope.go:117] "RemoveContainer" containerID="d7f6f1710a36a7bd2d73156d09139a7b10e823477ed7cb6590bb5bd3d6f2a90b" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.307517 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.320330 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.336271 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.353057 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:09:40 crc kubenswrapper[4848]: E0128 13:09:40.353685 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.353708 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: E0128 13:09:40.353733 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.353742 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: E0128 13:09:40.353753 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.353759 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.353982 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.354005 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.354018 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.354917 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.359494 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.377078 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:09:40 crc kubenswrapper[4848]: W0128 13:09:40.393564 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8c06e17_b640_4cd3_9574_11cbb37abd2a.slice/crio-b533a1fc759b4adddd1d6b596921a1a40fba2b9d03dcdfa4dd74fd723defbb3c WatchSource:0}: Error finding container b533a1fc759b4adddd1d6b596921a1a40fba2b9d03dcdfa4dd74fd723defbb3c: Status 404 returned error can't find the container with id b533a1fc759b4adddd1d6b596921a1a40fba2b9d03dcdfa4dd74fd723defbb3c Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.457534 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.457590 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dl7r\" (UniqueName: \"kubernetes.io/projected/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-kube-api-access-2dl7r\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.457638 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-logs\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.457675 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-config-data\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.458208 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.560332 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.560411 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dl7r\" (UniqueName: \"kubernetes.io/projected/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-kube-api-access-2dl7r\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.560482 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-logs\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.560521 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-config-data\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.560649 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.561119 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-logs\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.570084 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.572928 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.578579 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-config-data\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.580406 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dl7r\" (UniqueName: \"kubernetes.io/projected/c4b63577-cac1-4fce-bdca-c0b5a5d6c646-kube-api-access-2dl7r\") pod \"watcher-decision-engine-0\" (UID: \"c4b63577-cac1-4fce-bdca-c0b5a5d6c646\") " pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.700318 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.704510 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.863617 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d1240ae-2011-41cb-90a2-6f050020e305" path="/var/lib/kubelet/pods/6d1240ae-2011-41cb-90a2-6f050020e305/volumes" Jan 28 13:09:40 crc kubenswrapper[4848]: I0128 13:09:40.865407 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88151fad-4442-4d32-a675-f89f070ed086" path="/var/lib/kubelet/pods/88151fad-4442-4d32-a675-f89f070ed086/volumes" Jan 28 13:09:41 crc kubenswrapper[4848]: I0128 13:09:41.269412 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 28 13:09:41 crc kubenswrapper[4848]: I0128 13:09:41.273290 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerStarted","Data":"34d88ca7447387fa85383c08aa3b97091dadd666344f329ef1871c43238d4a95"} Jan 28 13:09:41 crc kubenswrapper[4848]: I0128 13:09:41.273358 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerStarted","Data":"b533a1fc759b4adddd1d6b596921a1a40fba2b9d03dcdfa4dd74fd723defbb3c"} Jan 28 13:09:42 crc kubenswrapper[4848]: I0128 13:09:42.288394 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"c4b63577-cac1-4fce-bdca-c0b5a5d6c646","Type":"ContainerStarted","Data":"3f32d64ee3b94a0460975066fa2cb2db3b23b55a1518ef76c34daa4a8834a5ce"} Jan 28 13:09:42 crc kubenswrapper[4848]: I0128 13:09:42.288860 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"c4b63577-cac1-4fce-bdca-c0b5a5d6c646","Type":"ContainerStarted","Data":"2d6303e449015307830553fba75326867ec8aaefdc4e40a0606ca24cb18351f2"} Jan 28 13:09:42 crc kubenswrapper[4848]: I0128 13:09:42.314629 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.314594209 podStartE2EDuration="2.314594209s" podCreationTimestamp="2026-01-28 13:09:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:42.305081348 +0000 UTC m=+1409.217298396" watchObservedRunningTime="2026-01-28 13:09:42.314594209 +0000 UTC m=+1409.226811247" Jan 28 13:09:43 crc kubenswrapper[4848]: I0128 13:09:43.301196 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerStarted","Data":"ff6bcd020e1d5b86640179b0cf8c475ec083f755cb9aefff9fee43ff95ff1da2"} Jan 28 13:09:43 crc kubenswrapper[4848]: I0128 13:09:43.301667 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerStarted","Data":"a129e1f80e2f4594d9eb1de7a451a1b981081b1263d8f3a82c215ab2910073b8"} Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.338104 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerStarted","Data":"c4f6ae1377600fc12b1953698d273ef9327508721256a58a7f4f2a6d66948e14"} Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.338989 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-central-agent" containerID="cri-o://34d88ca7447387fa85383c08aa3b97091dadd666344f329ef1871c43238d4a95" gracePeriod=30 Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.339091 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.339554 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="proxy-httpd" containerID="cri-o://c4f6ae1377600fc12b1953698d273ef9327508721256a58a7f4f2a6d66948e14" gracePeriod=30 Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.339602 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="sg-core" containerID="cri-o://a129e1f80e2f4594d9eb1de7a451a1b981081b1263d8f3a82c215ab2910073b8" gracePeriod=30 Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.339635 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-notification-agent" containerID="cri-o://ff6bcd020e1d5b86640179b0cf8c475ec083f755cb9aefff9fee43ff95ff1da2" gracePeriod=30 Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.419198 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.419618 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-log" containerID="cri-o://126185a13636e2d0ca9391e7cf781af2b1b7382ce765c4cd4c74772ff11e8d66" gracePeriod=30 Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.419819 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-httpd" containerID="cri-o://40b2809b9b20bc5aa0aec621b2cc722241b819605a3fe3834599709d35f7df98" gracePeriod=30 Jan 28 13:09:45 crc kubenswrapper[4848]: I0128 13:09:45.432839 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.41371306 podStartE2EDuration="6.432784066s" podCreationTimestamp="2026-01-28 13:09:39 +0000 UTC" firstStartedPulling="2026-01-28 13:09:40.399395488 +0000 UTC m=+1407.311612526" lastFinishedPulling="2026-01-28 13:09:44.418466494 +0000 UTC m=+1411.330683532" observedRunningTime="2026-01-28 13:09:45.391998493 +0000 UTC m=+1412.304215531" watchObservedRunningTime="2026-01-28 13:09:45.432784066 +0000 UTC m=+1412.345001094" Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.357221 4848 generic.go:334] "Generic (PLEG): container finished" podID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerID="a129e1f80e2f4594d9eb1de7a451a1b981081b1263d8f3a82c215ab2910073b8" exitCode=2 Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.357683 4848 generic.go:334] "Generic (PLEG): container finished" podID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerID="ff6bcd020e1d5b86640179b0cf8c475ec083f755cb9aefff9fee43ff95ff1da2" exitCode=0 Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.357492 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerDied","Data":"a129e1f80e2f4594d9eb1de7a451a1b981081b1263d8f3a82c215ab2910073b8"} Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.357785 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerDied","Data":"ff6bcd020e1d5b86640179b0cf8c475ec083f755cb9aefff9fee43ff95ff1da2"} Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.362724 4848 generic.go:334] "Generic (PLEG): container finished" podID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerID="126185a13636e2d0ca9391e7cf781af2b1b7382ce765c4cd4c74772ff11e8d66" exitCode=143 Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.362791 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6562fac3-e8b3-409d-b81c-aba6bef140d4","Type":"ContainerDied","Data":"126185a13636e2d0ca9391e7cf781af2b1b7382ce765c4cd4c74772ff11e8d66"} Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.939460 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.940234 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-httpd" containerID="cri-o://4e8c6fe2e074bebca1e2d365486caf9924f0b0121bb4b8bed8fc014f8ccb606b" gracePeriod=30 Jan 28 13:09:46 crc kubenswrapper[4848]: I0128 13:09:46.940200 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-log" containerID="cri-o://b5f8610f4564ec14c03df821d40a2326cec0eab73349364dd920936273f075a1" gracePeriod=30 Jan 28 13:09:47 crc kubenswrapper[4848]: I0128 13:09:47.376181 4848 generic.go:334] "Generic (PLEG): container finished" podID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerID="40b2809b9b20bc5aa0aec621b2cc722241b819605a3fe3834599709d35f7df98" exitCode=0 Jan 28 13:09:47 crc kubenswrapper[4848]: I0128 13:09:47.376719 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6562fac3-e8b3-409d-b81c-aba6bef140d4","Type":"ContainerDied","Data":"40b2809b9b20bc5aa0aec621b2cc722241b819605a3fe3834599709d35f7df98"} Jan 28 13:09:47 crc kubenswrapper[4848]: I0128 13:09:47.392745 4848 generic.go:334] "Generic (PLEG): container finished" podID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerID="b5f8610f4564ec14c03df821d40a2326cec0eab73349364dd920936273f075a1" exitCode=143 Jan 28 13:09:47 crc kubenswrapper[4848]: I0128 13:09:47.392805 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc","Type":"ContainerDied","Data":"b5f8610f4564ec14c03df821d40a2326cec0eab73349364dd920936273f075a1"} Jan 28 13:09:47 crc kubenswrapper[4848]: I0128 13:09:47.963707 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.057126 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-public-tls-certs\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.057228 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-config-data\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.058448 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.058499 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-logs\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.058534 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-combined-ca-bundle\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.058587 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-httpd-run\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.058642 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7pg6\" (UniqueName: \"kubernetes.io/projected/6562fac3-e8b3-409d-b81c-aba6bef140d4-kube-api-access-r7pg6\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.058689 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-scripts\") pod \"6562fac3-e8b3-409d-b81c-aba6bef140d4\" (UID: \"6562fac3-e8b3-409d-b81c-aba6bef140d4\") " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.066235 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.067513 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-logs" (OuterVolumeSpecName: "logs") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.101521 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6562fac3-e8b3-409d-b81c-aba6bef140d4-kube-api-access-r7pg6" (OuterVolumeSpecName: "kube-api-access-r7pg6") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "kube-api-access-r7pg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.109938 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.110267 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-scripts" (OuterVolumeSpecName: "scripts") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: E0128 13:09:48.160564 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-conmon-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.163357 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.163395 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.163406 4848 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6562fac3-e8b3-409d-b81c-aba6bef140d4-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.163417 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7pg6\" (UniqueName: \"kubernetes.io/projected/6562fac3-e8b3-409d-b81c-aba6bef140d4-kube-api-access-r7pg6\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.163432 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.166509 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.230552 4848 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.266877 4848 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.267350 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.275690 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.307451 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-config-data" (OuterVolumeSpecName: "config-data") pod "6562fac3-e8b3-409d-b81c-aba6bef140d4" (UID: "6562fac3-e8b3-409d-b81c-aba6bef140d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.371801 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.371846 4848 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6562fac3-e8b3-409d-b81c-aba6bef140d4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.409421 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6562fac3-e8b3-409d-b81c-aba6bef140d4","Type":"ContainerDied","Data":"d81ebaff6e78e8bf895bfc308968e139397a9ba65ad7ea5f8d99aac6765c8f35"} Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.409491 4848 scope.go:117] "RemoveContainer" containerID="40b2809b9b20bc5aa0aec621b2cc722241b819605a3fe3834599709d35f7df98" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.409500 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.477169 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.481455 4848 scope.go:117] "RemoveContainer" containerID="126185a13636e2d0ca9391e7cf781af2b1b7382ce765c4cd4c74772ff11e8d66" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.494346 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.541910 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:09:48 crc kubenswrapper[4848]: E0128 13:09:48.542770 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-httpd" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.542795 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-httpd" Jan 28 13:09:48 crc kubenswrapper[4848]: E0128 13:09:48.542858 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-log" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.542867 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-log" Jan 28 13:09:48 crc kubenswrapper[4848]: E0128 13:09:48.542889 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.542897 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.543287 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="88151fad-4442-4d32-a675-f89f070ed086" containerName="watcher-decision-engine" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.543320 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-log" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.543332 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" containerName="glance-httpd" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.548101 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.556357 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.556639 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.566664 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.689912 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-config-data\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.689984 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-logs\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.690007 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.690027 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-scripts\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.690620 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5j6g\" (UniqueName: \"kubernetes.io/projected/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-kube-api-access-j5j6g\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.690741 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.690901 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.691018 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.792787 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-config-data\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.792866 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-logs\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.792901 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.792931 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-scripts\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.793004 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5j6g\" (UniqueName: \"kubernetes.io/projected/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-kube-api-access-j5j6g\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.793059 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.793140 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.793207 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.793431 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.793695 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-logs\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.793839 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.798966 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-scripts\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.803011 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.810582 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-config-data\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.810766 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.817934 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5j6g\" (UniqueName: \"kubernetes.io/projected/0ab62279-8f3a-4ad3-8de4-84c72ad421a1-kube-api-access-j5j6g\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.847276 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"0ab62279-8f3a-4ad3-8de4-84c72ad421a1\") " pod="openstack/glance-default-external-api-0" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.864271 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6562fac3-e8b3-409d-b81c-aba6bef140d4" path="/var/lib/kubelet/pods/6562fac3-e8b3-409d-b81c-aba6bef140d4/volumes" Jan 28 13:09:48 crc kubenswrapper[4848]: I0128 13:09:48.902566 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 13:09:49 crc kubenswrapper[4848]: I0128 13:09:49.623696 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 13:09:50 crc kubenswrapper[4848]: I0128 13:09:50.437081 4848 generic.go:334] "Generic (PLEG): container finished" podID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerID="4e8c6fe2e074bebca1e2d365486caf9924f0b0121bb4b8bed8fc014f8ccb606b" exitCode=0 Jan 28 13:09:50 crc kubenswrapper[4848]: I0128 13:09:50.437169 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc","Type":"ContainerDied","Data":"4e8c6fe2e074bebca1e2d365486caf9924f0b0121bb4b8bed8fc014f8ccb606b"} Jan 28 13:09:50 crc kubenswrapper[4848]: I0128 13:09:50.439302 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0ab62279-8f3a-4ad3-8de4-84c72ad421a1","Type":"ContainerStarted","Data":"1dfd60cb13aec8cdbda49a0eb0fee9e0038fca152f27b5b3e7457acb5b30fdad"} Jan 28 13:09:50 crc kubenswrapper[4848]: I0128 13:09:50.701131 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:50 crc kubenswrapper[4848]: I0128 13:09:50.802623 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.208148 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.272654 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-combined-ca-bundle\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.272802 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgtlc\" (UniqueName: \"kubernetes.io/projected/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-kube-api-access-jgtlc\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.272869 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.272934 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-httpd-run\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.272988 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-logs\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.273013 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-scripts\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.273070 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-internal-tls-certs\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.273118 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-config-data\") pod \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\" (UID: \"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc\") " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.273790 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.285560 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-logs" (OuterVolumeSpecName: "logs") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.312679 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-scripts" (OuterVolumeSpecName: "scripts") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.316466 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.316513 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-kube-api-access-jgtlc" (OuterVolumeSpecName: "kube-api-access-jgtlc") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "kube-api-access-jgtlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.377226 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.377309 4848 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.377322 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.377335 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.377348 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgtlc\" (UniqueName: \"kubernetes.io/projected/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-kube-api-access-jgtlc\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.465504 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.473635 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-config-data" (OuterVolumeSpecName: "config-data") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.508034 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0ab62279-8f3a-4ad3-8de4-84c72ad421a1","Type":"ContainerStarted","Data":"dd2b7f01f65804bcf5cc15487519549d3060fac26f5f867550732cdc2b28628a"} Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.510513 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.510571 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.520242 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.520930 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3da661a9-7841-4ccb-aa19-2cdeb1be8dfc","Type":"ContainerDied","Data":"95856bb5d7bbfa319dd257381686a877ed26ede5e2557f68011e54760b5808a5"} Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.521058 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.521154 4848 scope.go:117] "RemoveContainer" containerID="4e8c6fe2e074bebca1e2d365486caf9924f0b0121bb4b8bed8fc014f8ccb606b" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.564681 4848 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.613349 4848 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.614098 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.667453 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" (UID: "3da661a9-7841-4ccb-aa19-2cdeb1be8dfc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.672524 4848 scope.go:117] "RemoveContainer" containerID="b5f8610f4564ec14c03df821d40a2326cec0eab73349364dd920936273f075a1" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.715691 4848 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.862561 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.873630 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.914312 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:09:51 crc kubenswrapper[4848]: E0128 13:09:51.914895 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-log" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.914909 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-log" Jan 28 13:09:51 crc kubenswrapper[4848]: E0128 13:09:51.914942 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-httpd" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.914947 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-httpd" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.915155 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-httpd" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.915170 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" containerName="glance-log" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.916508 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.922894 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.923218 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 28 13:09:51 crc kubenswrapper[4848]: I0128 13:09:51.972186 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027448 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/03e938c1-a61a-4c60-9d8e-660cefebc2fc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027547 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027597 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027629 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03e938c1-a61a-4c60-9d8e-660cefebc2fc-logs\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027656 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqg6q\" (UniqueName: \"kubernetes.io/projected/03e938c1-a61a-4c60-9d8e-660cefebc2fc-kube-api-access-jqg6q\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027748 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027819 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.027854 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.129816 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.130387 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.130583 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/03e938c1-a61a-4c60-9d8e-660cefebc2fc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.130771 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.131230 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/03e938c1-a61a-4c60-9d8e-660cefebc2fc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.131539 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.136327 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.137826 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03e938c1-a61a-4c60-9d8e-660cefebc2fc-logs\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.137931 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqg6q\" (UniqueName: \"kubernetes.io/projected/03e938c1-a61a-4c60-9d8e-660cefebc2fc-kube-api-access-jqg6q\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.138549 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03e938c1-a61a-4c60-9d8e-660cefebc2fc-logs\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.140772 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.140783 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.141346 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.151603 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.158658 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03e938c1-a61a-4c60-9d8e-660cefebc2fc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.162785 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqg6q\" (UniqueName: \"kubernetes.io/projected/03e938c1-a61a-4c60-9d8e-660cefebc2fc-kube-api-access-jqg6q\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.181812 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"03e938c1-a61a-4c60-9d8e-660cefebc2fc\") " pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.278964 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.545356 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0ab62279-8f3a-4ad3-8de4-84c72ad421a1","Type":"ContainerStarted","Data":"9feef3d20f048d39c3a19c92978f60b821c62387f44c7b7ab863bca5c3e2e8aa"} Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.580966 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.58093963 podStartE2EDuration="4.58093963s" podCreationTimestamp="2026-01-28 13:09:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:52.577143895 +0000 UTC m=+1419.489360953" watchObservedRunningTime="2026-01-28 13:09:52.58093963 +0000 UTC m=+1419.493156668" Jan 28 13:09:52 crc kubenswrapper[4848]: I0128 13:09:52.958499 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3da661a9-7841-4ccb-aa19-2cdeb1be8dfc" path="/var/lib/kubelet/pods/3da661a9-7841-4ccb-aa19-2cdeb1be8dfc/volumes" Jan 28 13:09:53 crc kubenswrapper[4848]: I0128 13:09:53.117010 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 13:09:53 crc kubenswrapper[4848]: W0128 13:09:53.124385 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03e938c1_a61a_4c60_9d8e_660cefebc2fc.slice/crio-8b9902b8a0af3bd966e25c7593a90c165a23b67450b09a5edc80b20c262771c0 WatchSource:0}: Error finding container 8b9902b8a0af3bd966e25c7593a90c165a23b67450b09a5edc80b20c262771c0: Status 404 returned error can't find the container with id 8b9902b8a0af3bd966e25c7593a90c165a23b67450b09a5edc80b20c262771c0 Jan 28 13:09:53 crc kubenswrapper[4848]: I0128 13:09:53.567787 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"03e938c1-a61a-4c60-9d8e-660cefebc2fc","Type":"ContainerStarted","Data":"8b9902b8a0af3bd966e25c7593a90c165a23b67450b09a5edc80b20c262771c0"} Jan 28 13:09:54 crc kubenswrapper[4848]: I0128 13:09:54.579166 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"03e938c1-a61a-4c60-9d8e-660cefebc2fc","Type":"ContainerStarted","Data":"ac2081e275ecd530ae913fb986628f20e606ef947b6a6f77872f2ea53add73d4"} Jan 28 13:09:54 crc kubenswrapper[4848]: I0128 13:09:54.579831 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"03e938c1-a61a-4c60-9d8e-660cefebc2fc","Type":"ContainerStarted","Data":"873ae08e80441d356f3f5d439298e7d733cdf8d97c0497d3e54c1b978aa92290"} Jan 28 13:09:55 crc kubenswrapper[4848]: I0128 13:09:55.627098 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.627066913 podStartE2EDuration="4.627066913s" podCreationTimestamp="2026-01-28 13:09:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:09:55.616207344 +0000 UTC m=+1422.528424382" watchObservedRunningTime="2026-01-28 13:09:55.627066913 +0000 UTC m=+1422.539283951" Jan 28 13:09:57 crc kubenswrapper[4848]: I0128 13:09:57.618953 4848 generic.go:334] "Generic (PLEG): container finished" podID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerID="34d88ca7447387fa85383c08aa3b97091dadd666344f329ef1871c43238d4a95" exitCode=0 Jan 28 13:09:57 crc kubenswrapper[4848]: I0128 13:09:57.619044 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerDied","Data":"34d88ca7447387fa85383c08aa3b97091dadd666344f329ef1871c43238d4a95"} Jan 28 13:09:58 crc kubenswrapper[4848]: E0128 13:09:58.443461 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-conmon-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:09:58 crc kubenswrapper[4848]: I0128 13:09:58.903583 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 13:09:58 crc kubenswrapper[4848]: I0128 13:09:58.903668 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 13:09:58 crc kubenswrapper[4848]: I0128 13:09:58.938990 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 13:09:58 crc kubenswrapper[4848]: I0128 13:09:58.949768 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 13:09:59 crc kubenswrapper[4848]: I0128 13:09:59.644195 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 13:09:59 crc kubenswrapper[4848]: I0128 13:09:59.644835 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 13:10:01 crc kubenswrapper[4848]: I0128 13:10:01.675891 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 13:10:01 crc kubenswrapper[4848]: I0128 13:10:01.677059 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:10:01 crc kubenswrapper[4848]: I0128 13:10:01.865021 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.281690 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.283091 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.327436 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.333178 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.679669 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.680123 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.947226 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-22zhk"] Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.950626 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:02 crc kubenswrapper[4848]: I0128 13:10:02.966644 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-22zhk"] Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.010602 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4ngk\" (UniqueName: \"kubernetes.io/projected/a42d5045-d920-4f1c-a48c-8e600fef0173-kube-api-access-p4ngk\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.010801 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-utilities\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.010891 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-catalog-content\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.113262 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-utilities\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.113381 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-catalog-content\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.113412 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4ngk\" (UniqueName: \"kubernetes.io/projected/a42d5045-d920-4f1c-a48c-8e600fef0173-kube-api-access-p4ngk\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.114097 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-utilities\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.114354 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-catalog-content\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.139113 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4ngk\" (UniqueName: \"kubernetes.io/projected/a42d5045-d920-4f1c-a48c-8e600fef0173-kube-api-access-p4ngk\") pod \"redhat-operators-22zhk\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.290072 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:03 crc kubenswrapper[4848]: I0128 13:10:03.833957 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-22zhk"] Jan 28 13:10:03 crc kubenswrapper[4848]: W0128 13:10:03.858005 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda42d5045_d920_4f1c_a48c_8e600fef0173.slice/crio-73a98ac737e6efaa20512ae59ed6887d4385903f0d9db0e821eecad3ef7c07d7 WatchSource:0}: Error finding container 73a98ac737e6efaa20512ae59ed6887d4385903f0d9db0e821eecad3ef7c07d7: Status 404 returned error can't find the container with id 73a98ac737e6efaa20512ae59ed6887d4385903f0d9db0e821eecad3ef7c07d7 Jan 28 13:10:04 crc kubenswrapper[4848]: I0128 13:10:04.698931 4848 generic.go:334] "Generic (PLEG): container finished" podID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerID="49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14" exitCode=0 Jan 28 13:10:04 crc kubenswrapper[4848]: I0128 13:10:04.699042 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-22zhk" event={"ID":"a42d5045-d920-4f1c-a48c-8e600fef0173","Type":"ContainerDied","Data":"49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14"} Jan 28 13:10:04 crc kubenswrapper[4848]: I0128 13:10:04.699455 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:10:04 crc kubenswrapper[4848]: I0128 13:10:04.699472 4848 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 13:10:04 crc kubenswrapper[4848]: I0128 13:10:04.699460 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-22zhk" event={"ID":"a42d5045-d920-4f1c-a48c-8e600fef0173","Type":"ContainerStarted","Data":"73a98ac737e6efaa20512ae59ed6887d4385903f0d9db0e821eecad3ef7c07d7"} Jan 28 13:10:05 crc kubenswrapper[4848]: I0128 13:10:05.259473 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:05 crc kubenswrapper[4848]: I0128 13:10:05.277041 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 13:10:06 crc kubenswrapper[4848]: I0128 13:10:06.755614 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-22zhk" event={"ID":"a42d5045-d920-4f1c-a48c-8e600fef0173","Type":"ContainerStarted","Data":"e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e"} Jan 28 13:10:07 crc kubenswrapper[4848]: I0128 13:10:07.767107 4848 generic.go:334] "Generic (PLEG): container finished" podID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerID="e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e" exitCode=0 Jan 28 13:10:07 crc kubenswrapper[4848]: I0128 13:10:07.767221 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-22zhk" event={"ID":"a42d5045-d920-4f1c-a48c-8e600fef0173","Type":"ContainerDied","Data":"e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e"} Jan 28 13:10:07 crc kubenswrapper[4848]: I0128 13:10:07.767601 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-22zhk" event={"ID":"a42d5045-d920-4f1c-a48c-8e600fef0173","Type":"ContainerStarted","Data":"b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83"} Jan 28 13:10:07 crc kubenswrapper[4848]: I0128 13:10:07.795426 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-22zhk" podStartSLOduration=3.2414243369999998 podStartE2EDuration="5.795400352s" podCreationTimestamp="2026-01-28 13:10:02 +0000 UTC" firstStartedPulling="2026-01-28 13:10:04.701978257 +0000 UTC m=+1431.614195295" lastFinishedPulling="2026-01-28 13:10:07.255954262 +0000 UTC m=+1434.168171310" observedRunningTime="2026-01-28 13:10:07.78591438 +0000 UTC m=+1434.698131438" watchObservedRunningTime="2026-01-28 13:10:07.795400352 +0000 UTC m=+1434.707617390" Jan 28 13:10:07 crc kubenswrapper[4848]: I0128 13:10:07.924780 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:10:07 crc kubenswrapper[4848]: I0128 13:10:07.924864 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:10:08 crc kubenswrapper[4848]: E0128 13:10:08.704842 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-conmon-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d1240ae_2011_41cb_90a2_6f050020e305.slice/crio-1d25688c4e3d48c2f5ab877b7ec210b8d44081a07203cec0c02a24ff1114e8c2.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:10:09 crc kubenswrapper[4848]: I0128 13:10:09.730049 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 13:10:13 crc kubenswrapper[4848]: I0128 13:10:13.290407 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:13 crc kubenswrapper[4848]: I0128 13:10:13.291219 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:13 crc kubenswrapper[4848]: I0128 13:10:13.835139 4848 generic.go:334] "Generic (PLEG): container finished" podID="c1ace158-4e32-4a9a-b350-4afddceb574c" containerID="7ac8c112211d7b6ad12d0f8daf1a5d6983909337df04ebfa23aafb9bb60e5ed1" exitCode=0 Jan 28 13:10:13 crc kubenswrapper[4848]: I0128 13:10:13.835190 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" event={"ID":"c1ace158-4e32-4a9a-b350-4afddceb574c","Type":"ContainerDied","Data":"7ac8c112211d7b6ad12d0f8daf1a5d6983909337df04ebfa23aafb9bb60e5ed1"} Jan 28 13:10:14 crc kubenswrapper[4848]: I0128 13:10:14.349496 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-22zhk" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="registry-server" probeResult="failure" output=< Jan 28 13:10:14 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:10:14 crc kubenswrapper[4848]: > Jan 28 13:10:15 crc kubenswrapper[4848]: E0128 13:10:15.026841 4848 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/68248e08cd525324d9d3f21baac32d8ce3520a6f6cd406545b25ec21da7b12eb/diff" to get inode usage: stat /var/lib/containers/storage/overlay/68248e08cd525324d9d3f21baac32d8ce3520a6f6cd406545b25ec21da7b12eb/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_ceilometer-0_6d1240ae-2011-41cb-90a2-6f050020e305/ceilometer-central-agent/0.log" to get inode usage: stat /var/log/pods/openstack_ceilometer-0_6d1240ae-2011-41cb-90a2-6f050020e305/ceilometer-central-agent/0.log: no such file or directory Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.376918 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.469398 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-config-data\") pod \"c1ace158-4e32-4a9a-b350-4afddceb574c\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.469725 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-combined-ca-bundle\") pod \"c1ace158-4e32-4a9a-b350-4afddceb574c\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.469753 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6nsr\" (UniqueName: \"kubernetes.io/projected/c1ace158-4e32-4a9a-b350-4afddceb574c-kube-api-access-l6nsr\") pod \"c1ace158-4e32-4a9a-b350-4afddceb574c\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.469815 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-scripts\") pod \"c1ace158-4e32-4a9a-b350-4afddceb574c\" (UID: \"c1ace158-4e32-4a9a-b350-4afddceb574c\") " Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.477760 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-scripts" (OuterVolumeSpecName: "scripts") pod "c1ace158-4e32-4a9a-b350-4afddceb574c" (UID: "c1ace158-4e32-4a9a-b350-4afddceb574c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.477798 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1ace158-4e32-4a9a-b350-4afddceb574c-kube-api-access-l6nsr" (OuterVolumeSpecName: "kube-api-access-l6nsr") pod "c1ace158-4e32-4a9a-b350-4afddceb574c" (UID: "c1ace158-4e32-4a9a-b350-4afddceb574c"). InnerVolumeSpecName "kube-api-access-l6nsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.505107 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1ace158-4e32-4a9a-b350-4afddceb574c" (UID: "c1ace158-4e32-4a9a-b350-4afddceb574c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.507088 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-config-data" (OuterVolumeSpecName: "config-data") pod "c1ace158-4e32-4a9a-b350-4afddceb574c" (UID: "c1ace158-4e32-4a9a-b350-4afddceb574c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.573528 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.573582 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.573598 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ace158-4e32-4a9a-b350-4afddceb574c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.573612 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6nsr\" (UniqueName: \"kubernetes.io/projected/c1ace158-4e32-4a9a-b350-4afddceb574c-kube-api-access-l6nsr\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.905616 4848 generic.go:334] "Generic (PLEG): container finished" podID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerID="c4f6ae1377600fc12b1953698d273ef9327508721256a58a7f4f2a6d66948e14" exitCode=137 Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.906069 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerDied","Data":"c4f6ae1377600fc12b1953698d273ef9327508721256a58a7f4f2a6d66948e14"} Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.906108 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8c06e17-b640-4cd3-9574-11cbb37abd2a","Type":"ContainerDied","Data":"b533a1fc759b4adddd1d6b596921a1a40fba2b9d03dcdfa4dd74fd723defbb3c"} Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.906125 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b533a1fc759b4adddd1d6b596921a1a40fba2b9d03dcdfa4dd74fd723defbb3c" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.912942 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" event={"ID":"c1ace158-4e32-4a9a-b350-4afddceb574c","Type":"ContainerDied","Data":"00a501e6da790970f9ca2a24d34ee18b3785ab2aafeba462c5c09eb374c21c0e"} Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.913006 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00a501e6da790970f9ca2a24d34ee18b3785ab2aafeba462c5c09eb374c21c0e" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.913053 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8rd4v" Jan 28 13:10:15 crc kubenswrapper[4848]: I0128 13:10:15.953388 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.070898 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 13:10:16 crc kubenswrapper[4848]: E0128 13:10:16.071502 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-notification-agent" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071523 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-notification-agent" Jan 28 13:10:16 crc kubenswrapper[4848]: E0128 13:10:16.071558 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="proxy-httpd" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071566 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="proxy-httpd" Jan 28 13:10:16 crc kubenswrapper[4848]: E0128 13:10:16.071594 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="sg-core" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071601 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="sg-core" Jan 28 13:10:16 crc kubenswrapper[4848]: E0128 13:10:16.071628 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1ace158-4e32-4a9a-b350-4afddceb574c" containerName="nova-cell0-conductor-db-sync" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071634 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1ace158-4e32-4a9a-b350-4afddceb574c" containerName="nova-cell0-conductor-db-sync" Jan 28 13:10:16 crc kubenswrapper[4848]: E0128 13:10:16.071674 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-central-agent" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071681 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-central-agent" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071874 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-notification-agent" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071892 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1ace158-4e32-4a9a-b350-4afddceb574c" containerName="nova-cell0-conductor-db-sync" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071917 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="sg-core" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071928 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="proxy-httpd" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.071941 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" containerName="ceilometer-central-agent" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.072770 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.076049 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hbjw8" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.076455 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.085140 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.085809 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-sg-core-conf-yaml\") pod \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.085971 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-run-httpd\") pod \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.086040 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g54d2\" (UniqueName: \"kubernetes.io/projected/a8c06e17-b640-4cd3-9574-11cbb37abd2a-kube-api-access-g54d2\") pod \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.086119 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-log-httpd\") pod \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.086167 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-scripts\") pod \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.086197 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-config-data\") pod \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.086344 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-combined-ca-bundle\") pod \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\" (UID: \"a8c06e17-b640-4cd3-9574-11cbb37abd2a\") " Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.087035 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a8c06e17-b640-4cd3-9574-11cbb37abd2a" (UID: "a8c06e17-b640-4cd3-9574-11cbb37abd2a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.087301 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a8c06e17-b640-4cd3-9574-11cbb37abd2a" (UID: "a8c06e17-b640-4cd3-9574-11cbb37abd2a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.096849 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c06e17-b640-4cd3-9574-11cbb37abd2a-kube-api-access-g54d2" (OuterVolumeSpecName: "kube-api-access-g54d2") pod "a8c06e17-b640-4cd3-9574-11cbb37abd2a" (UID: "a8c06e17-b640-4cd3-9574-11cbb37abd2a"). InnerVolumeSpecName "kube-api-access-g54d2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.098123 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-scripts" (OuterVolumeSpecName: "scripts") pod "a8c06e17-b640-4cd3-9574-11cbb37abd2a" (UID: "a8c06e17-b640-4cd3-9574-11cbb37abd2a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.134369 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a8c06e17-b640-4cd3-9574-11cbb37abd2a" (UID: "a8c06e17-b640-4cd3-9574-11cbb37abd2a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.188979 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84390cac-21ce-4f4f-98f8-a8371c1742cb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.189047 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5v5c\" (UniqueName: \"kubernetes.io/projected/84390cac-21ce-4f4f-98f8-a8371c1742cb-kube-api-access-p5v5c\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.189369 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84390cac-21ce-4f4f-98f8-a8371c1742cb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.189846 4848 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.189869 4848 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.189885 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g54d2\" (UniqueName: \"kubernetes.io/projected/a8c06e17-b640-4cd3-9574-11cbb37abd2a-kube-api-access-g54d2\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.189900 4848 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8c06e17-b640-4cd3-9574-11cbb37abd2a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.189910 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.193697 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8c06e17-b640-4cd3-9574-11cbb37abd2a" (UID: "a8c06e17-b640-4cd3-9574-11cbb37abd2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.229006 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-config-data" (OuterVolumeSpecName: "config-data") pod "a8c06e17-b640-4cd3-9574-11cbb37abd2a" (UID: "a8c06e17-b640-4cd3-9574-11cbb37abd2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.309008 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84390cac-21ce-4f4f-98f8-a8371c1742cb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.309226 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84390cac-21ce-4f4f-98f8-a8371c1742cb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.309267 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5v5c\" (UniqueName: \"kubernetes.io/projected/84390cac-21ce-4f4f-98f8-a8371c1742cb-kube-api-access-p5v5c\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.309395 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.309409 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8c06e17-b640-4cd3-9574-11cbb37abd2a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.313556 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84390cac-21ce-4f4f-98f8-a8371c1742cb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.314592 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84390cac-21ce-4f4f-98f8-a8371c1742cb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.340110 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5v5c\" (UniqueName: \"kubernetes.io/projected/84390cac-21ce-4f4f-98f8-a8371c1742cb-kube-api-access-p5v5c\") pod \"nova-cell0-conductor-0\" (UID: \"84390cac-21ce-4f4f-98f8-a8371c1742cb\") " pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.402240 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.908862 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.923429 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.923542 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"84390cac-21ce-4f4f-98f8-a8371c1742cb","Type":"ContainerStarted","Data":"634971b2215403bc40c14c0187eb3575e97983fb2000b3ac9c542ee63910d3bf"} Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.957436 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.972675 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.993910 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:10:16 crc kubenswrapper[4848]: I0128 13:10:16.997460 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.004007 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.004402 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.007454 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.127058 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-run-httpd\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.127120 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-scripts\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.127145 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-config-data\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.127174 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.127678 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.127748 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-log-httpd\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.128069 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-827ts\" (UniqueName: \"kubernetes.io/projected/9e026660-5464-42b9-876f-0d28b39f28fe-kube-api-access-827ts\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.231364 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-run-httpd\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.231793 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-scripts\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.231832 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-config-data\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.231868 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.231954 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.231980 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-log-httpd\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.232064 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-827ts\" (UniqueName: \"kubernetes.io/projected/9e026660-5464-42b9-876f-0d28b39f28fe-kube-api-access-827ts\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.235414 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-log-httpd\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.235415 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-run-httpd\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.239930 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-scripts\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.249095 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.278129 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.279148 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-config-data\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.361094 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-827ts\" (UniqueName: \"kubernetes.io/projected/9e026660-5464-42b9-876f-0d28b39f28fe-kube-api-access-827ts\") pod \"ceilometer-0\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.627476 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:10:17 crc kubenswrapper[4848]: I0128 13:10:17.934481 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"84390cac-21ce-4f4f-98f8-a8371c1742cb","Type":"ContainerStarted","Data":"6443557645c3e152435accc244ee2d76d7ffdf817da1d19fa01ccfff10c6dcee"} Jan 28 13:10:18 crc kubenswrapper[4848]: I0128 13:10:18.393305 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:10:18 crc kubenswrapper[4848]: I0128 13:10:18.866228 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c06e17-b640-4cd3-9574-11cbb37abd2a" path="/var/lib/kubelet/pods/a8c06e17-b640-4cd3-9574-11cbb37abd2a/volumes" Jan 28 13:10:18 crc kubenswrapper[4848]: I0128 13:10:18.970508 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerStarted","Data":"204f3b263bc8aa130403f00a8758ca4664166abddd147dfc8151611f4e69a4dd"} Jan 28 13:10:18 crc kubenswrapper[4848]: I0128 13:10:18.970567 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerStarted","Data":"956755e45932ced0387c19e5089628ed7c83f0fb10bc8556715ced758cef9567"} Jan 28 13:10:18 crc kubenswrapper[4848]: I0128 13:10:18.970608 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:18 crc kubenswrapper[4848]: I0128 13:10:18.995260 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.9952163 podStartE2EDuration="2.9952163s" podCreationTimestamp="2026-01-28 13:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:18.992852205 +0000 UTC m=+1445.905069243" watchObservedRunningTime="2026-01-28 13:10:18.9952163 +0000 UTC m=+1445.907433398" Jan 28 13:10:19 crc kubenswrapper[4848]: I0128 13:10:19.987791 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerStarted","Data":"87c6339dbf0b935c2b4718a1163faa86e98cf90bced8282e8c1f459b3f216529"} Jan 28 13:10:21 crc kubenswrapper[4848]: I0128 13:10:21.008417 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerStarted","Data":"d70d3131bb5423997c9a4af376dcea61493d0c82157d560a0144fe5638cd2220"} Jan 28 13:10:22 crc kubenswrapper[4848]: I0128 13:10:22.025446 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerStarted","Data":"b258cfdb1e9f77810684d68ef1b20418e2f7fc94b2baa31b045707b282dee60f"} Jan 28 13:10:22 crc kubenswrapper[4848]: I0128 13:10:22.025969 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 13:10:23 crc kubenswrapper[4848]: I0128 13:10:23.361311 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:23 crc kubenswrapper[4848]: I0128 13:10:23.386198 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.173221087 podStartE2EDuration="7.386175963s" podCreationTimestamp="2026-01-28 13:10:16 +0000 UTC" firstStartedPulling="2026-01-28 13:10:18.410888534 +0000 UTC m=+1445.323105572" lastFinishedPulling="2026-01-28 13:10:21.62384341 +0000 UTC m=+1448.536060448" observedRunningTime="2026-01-28 13:10:22.058127225 +0000 UTC m=+1448.970344283" watchObservedRunningTime="2026-01-28 13:10:23.386175963 +0000 UTC m=+1450.298393001" Jan 28 13:10:23 crc kubenswrapper[4848]: I0128 13:10:23.428566 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:23 crc kubenswrapper[4848]: I0128 13:10:23.621026 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-22zhk"] Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.062669 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-22zhk" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="registry-server" containerID="cri-o://b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83" gracePeriod=2 Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.748923 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.831921 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4ngk\" (UniqueName: \"kubernetes.io/projected/a42d5045-d920-4f1c-a48c-8e600fef0173-kube-api-access-p4ngk\") pod \"a42d5045-d920-4f1c-a48c-8e600fef0173\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.832044 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-catalog-content\") pod \"a42d5045-d920-4f1c-a48c-8e600fef0173\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.832129 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-utilities\") pod \"a42d5045-d920-4f1c-a48c-8e600fef0173\" (UID: \"a42d5045-d920-4f1c-a48c-8e600fef0173\") " Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.833788 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-utilities" (OuterVolumeSpecName: "utilities") pod "a42d5045-d920-4f1c-a48c-8e600fef0173" (UID: "a42d5045-d920-4f1c-a48c-8e600fef0173"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.853749 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a42d5045-d920-4f1c-a48c-8e600fef0173-kube-api-access-p4ngk" (OuterVolumeSpecName: "kube-api-access-p4ngk") pod "a42d5045-d920-4f1c-a48c-8e600fef0173" (UID: "a42d5045-d920-4f1c-a48c-8e600fef0173"). InnerVolumeSpecName "kube-api-access-p4ngk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.936524 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4ngk\" (UniqueName: \"kubernetes.io/projected/a42d5045-d920-4f1c-a48c-8e600fef0173-kube-api-access-p4ngk\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.936675 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:25 crc kubenswrapper[4848]: I0128 13:10:25.950397 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a42d5045-d920-4f1c-a48c-8e600fef0173" (UID: "a42d5045-d920-4f1c-a48c-8e600fef0173"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.038268 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42d5045-d920-4f1c-a48c-8e600fef0173-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.075127 4848 generic.go:334] "Generic (PLEG): container finished" podID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerID="b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83" exitCode=0 Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.075178 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-22zhk" event={"ID":"a42d5045-d920-4f1c-a48c-8e600fef0173","Type":"ContainerDied","Data":"b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83"} Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.075211 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-22zhk" event={"ID":"a42d5045-d920-4f1c-a48c-8e600fef0173","Type":"ContainerDied","Data":"73a98ac737e6efaa20512ae59ed6887d4385903f0d9db0e821eecad3ef7c07d7"} Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.075230 4848 scope.go:117] "RemoveContainer" containerID="b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.075399 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-22zhk" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.110819 4848 scope.go:117] "RemoveContainer" containerID="e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.126625 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-22zhk"] Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.133612 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-22zhk"] Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.139286 4848 scope.go:117] "RemoveContainer" containerID="49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.181716 4848 scope.go:117] "RemoveContainer" containerID="b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83" Jan 28 13:10:26 crc kubenswrapper[4848]: E0128 13:10:26.182217 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83\": container with ID starting with b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83 not found: ID does not exist" containerID="b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.182290 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83"} err="failed to get container status \"b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83\": rpc error: code = NotFound desc = could not find container \"b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83\": container with ID starting with b57c61381b375cf1f5afb91e8249ea955c15a0aa382a2fd3de7ea82d1aa10b83 not found: ID does not exist" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.182322 4848 scope.go:117] "RemoveContainer" containerID="e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e" Jan 28 13:10:26 crc kubenswrapper[4848]: E0128 13:10:26.182580 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e\": container with ID starting with e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e not found: ID does not exist" containerID="e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.182604 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e"} err="failed to get container status \"e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e\": rpc error: code = NotFound desc = could not find container \"e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e\": container with ID starting with e8131b5b334823881c077df8f1a58ce1ed5c74cc0cc872b0e555bc67926cec4e not found: ID does not exist" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.182621 4848 scope.go:117] "RemoveContainer" containerID="49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14" Jan 28 13:10:26 crc kubenswrapper[4848]: E0128 13:10:26.182835 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14\": container with ID starting with 49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14 not found: ID does not exist" containerID="49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.182856 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14"} err="failed to get container status \"49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14\": rpc error: code = NotFound desc = could not find container \"49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14\": container with ID starting with 49522eb0c52d9a1a2cd1662f5d628c43b098bb99de77060da58eebe5ded21d14 not found: ID does not exist" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.432596 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 28 13:10:26 crc kubenswrapper[4848]: I0128 13:10:26.864822 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" path="/var/lib/kubelet/pods/a42d5045-d920-4f1c-a48c-8e600fef0173/volumes" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.063704 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-xqjf4"] Jan 28 13:10:27 crc kubenswrapper[4848]: E0128 13:10:27.065003 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="extract-content" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.065129 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="extract-content" Jan 28 13:10:27 crc kubenswrapper[4848]: E0128 13:10:27.065224 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="extract-utilities" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.065318 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="extract-utilities" Jan 28 13:10:27 crc kubenswrapper[4848]: E0128 13:10:27.065395 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="registry-server" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.065463 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="registry-server" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.065750 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a42d5045-d920-4f1c-a48c-8e600fef0173" containerName="registry-server" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.066648 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.073480 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.074034 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.077074 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-xqjf4"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.164130 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.164697 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-config-data\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.164766 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-scripts\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.165300 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2hpk\" (UniqueName: \"kubernetes.io/projected/77d21746-7f11-4c88-9433-8672991fe2e3-kube-api-access-m2hpk\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.268171 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-config-data\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.268583 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-scripts\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.268808 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2hpk\" (UniqueName: \"kubernetes.io/projected/77d21746-7f11-4c88-9433-8672991fe2e3-kube-api-access-m2hpk\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.269037 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.273998 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-scripts\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.274585 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.305584 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-config-data\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.321140 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2hpk\" (UniqueName: \"kubernetes.io/projected/77d21746-7f11-4c88-9433-8672991fe2e3-kube-api-access-m2hpk\") pod \"nova-cell0-cell-mapping-xqjf4\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.335307 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.338826 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.341834 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.384187 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.437339 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.495742 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-config-data\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.495810 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a879d23c-f2e2-47d8-929a-795e3bb19442-logs\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.495840 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.495906 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vcgs\" (UniqueName: \"kubernetes.io/projected/a879d23c-f2e2-47d8-929a-795e3bb19442-kube-api-access-6vcgs\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.546045 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.549793 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.579907 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.587198 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.590409 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.602070 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-config-data\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.602137 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a879d23c-f2e2-47d8-929a-795e3bb19442-logs\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.602174 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.602264 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vcgs\" (UniqueName: \"kubernetes.io/projected/a879d23c-f2e2-47d8-929a-795e3bb19442-kube-api-access-6vcgs\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.602979 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.611434 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a879d23c-f2e2-47d8-929a-795e3bb19442-logs\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.622651 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.627029 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-config-data\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.640717 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.670403 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.711389 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vcgs\" (UniqueName: \"kubernetes.io/projected/a879d23c-f2e2-47d8-929a-795e3bb19442-kube-api-access-6vcgs\") pod \"nova-api-0\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.711711 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mq2vk\" (UniqueName: \"kubernetes.io/projected/356f4d06-990d-4f97-be70-555f01573201-kube-api-access-mq2vk\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.712240 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c1d23b5-4c91-4195-8290-4e7ae032bac9-logs\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.712417 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-config-data\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.712536 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-config-data\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.712667 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.712805 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62jsx\" (UniqueName: \"kubernetes.io/projected/0c1d23b5-4c91-4195-8290-4e7ae032bac9-kube-api-access-62jsx\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.712925 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.736357 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.738284 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.746651 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.792527 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.819722 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c1d23b5-4c91-4195-8290-4e7ae032bac9-logs\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.819790 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-config-data\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.819820 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-config-data\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.819846 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.819883 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62jsx\" (UniqueName: \"kubernetes.io/projected/0c1d23b5-4c91-4195-8290-4e7ae032bac9-kube-api-access-62jsx\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.819904 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.819940 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mq2vk\" (UniqueName: \"kubernetes.io/projected/356f4d06-990d-4f97-be70-555f01573201-kube-api-access-mq2vk\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.820852 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c1d23b5-4c91-4195-8290-4e7ae032bac9-logs\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.829156 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.830430 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-config-data\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.858791 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.859338 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-config-data\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.863432 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62jsx\" (UniqueName: \"kubernetes.io/projected/0c1d23b5-4c91-4195-8290-4e7ae032bac9-kube-api-access-62jsx\") pod \"nova-metadata-0\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " pod="openstack/nova-metadata-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.896765 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mq2vk\" (UniqueName: \"kubernetes.io/projected/356f4d06-990d-4f97-be70-555f01573201-kube-api-access-mq2vk\") pod \"nova-scheduler-0\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.938011 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.938852 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.939130 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fd8j\" (UniqueName: \"kubernetes.io/projected/7c2944fc-8fcf-499b-8e98-8e026c1065d2-kube-api-access-2fd8j\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:27 crc kubenswrapper[4848]: I0128 13:10:27.970344 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.004295 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.022135 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.051129 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.051233 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.064959 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fd8j\" (UniqueName: \"kubernetes.io/projected/7c2944fc-8fcf-499b-8e98-8e026c1065d2-kube-api-access-2fd8j\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.116079 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.123271 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.123364 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54bdc65d45-fctrd"] Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.125802 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.129023 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fd8j\" (UniqueName: \"kubernetes.io/projected/7c2944fc-8fcf-499b-8e98-8e026c1065d2-kube-api-access-2fd8j\") pod \"nova-cell1-novncproxy-0\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.252352 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54bdc65d45-fctrd"] Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.388320 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.413876 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-svc\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.413933 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlbfs\" (UniqueName: \"kubernetes.io/projected/86b85bd6-6e73-4373-b9bc-faea340b9e24-kube-api-access-rlbfs\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.413975 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-sb\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.414007 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-swift-storage-0\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.414069 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-config\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.414178 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-nb\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.534815 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-svc\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.537971 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-svc\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.535359 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlbfs\" (UniqueName: \"kubernetes.io/projected/86b85bd6-6e73-4373-b9bc-faea340b9e24-kube-api-access-rlbfs\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.545616 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-sb\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.545671 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-swift-storage-0\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.545814 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-config\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.546551 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-nb\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.547784 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-nb\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.551235 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-swift-storage-0\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.561059 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-config\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.562001 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-sb\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.577545 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlbfs\" (UniqueName: \"kubernetes.io/projected/86b85bd6-6e73-4373-b9bc-faea340b9e24-kube-api-access-rlbfs\") pod \"dnsmasq-dns-54bdc65d45-fctrd\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.593115 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-xqjf4"] Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.789667 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:28 crc kubenswrapper[4848]: I0128 13:10:28.840629 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:28 crc kubenswrapper[4848]: W0128 13:10:28.897594 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda879d23c_f2e2_47d8_929a_795e3bb19442.slice/crio-1bc752b839da21a43bd6becba74185d28bf9abfda07f6af5ade816210af5d1a2 WatchSource:0}: Error finding container 1bc752b839da21a43bd6becba74185d28bf9abfda07f6af5ade816210af5d1a2: Status 404 returned error can't find the container with id 1bc752b839da21a43bd6becba74185d28bf9abfda07f6af5ade816210af5d1a2 Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.141045 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:29 crc kubenswrapper[4848]: W0128 13:10:29.157053 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c1d23b5_4c91_4195_8290_4e7ae032bac9.slice/crio-abbb9d5014ee47d91b403a5cd17bdb50c3421418b328f5ad8e3732aa537c0485 WatchSource:0}: Error finding container abbb9d5014ee47d91b403a5cd17bdb50c3421418b328f5ad8e3732aa537c0485: Status 404 returned error can't find the container with id abbb9d5014ee47d91b403a5cd17bdb50c3421418b328f5ad8e3732aa537c0485 Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.165571 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a879d23c-f2e2-47d8-929a-795e3bb19442","Type":"ContainerStarted","Data":"1bc752b839da21a43bd6becba74185d28bf9abfda07f6af5ade816210af5d1a2"} Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.182738 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xqjf4" event={"ID":"77d21746-7f11-4c88-9433-8672991fe2e3","Type":"ContainerStarted","Data":"c23039557b3d2d5dddf6c6ed44be31cfe71c0d206b50bbab5afa2fda6fee2408"} Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.183138 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xqjf4" event={"ID":"77d21746-7f11-4c88-9433-8672991fe2e3","Type":"ContainerStarted","Data":"7c8b882be1f86673e1291f63c41db757bea18062eed0469909782c842a995bc1"} Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.193784 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.206089 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-xqjf4" podStartSLOduration=2.206061813 podStartE2EDuration="2.206061813s" podCreationTimestamp="2026-01-28 13:10:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:29.203699428 +0000 UTC m=+1456.115916466" watchObservedRunningTime="2026-01-28 13:10:29.206061813 +0000 UTC m=+1456.118278851" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.238927 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bglkq"] Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.253845 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.268559 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.268752 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.283101 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bglkq"] Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.303883 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.304142 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz8hw\" (UniqueName: \"kubernetes.io/projected/ba850830-ca3a-43c2-8639-bdf9386d7f9b-kube-api-access-qz8hw\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.304495 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-config-data\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.304852 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-scripts\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.385203 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.405292 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54bdc65d45-fctrd"] Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.406832 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.406938 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz8hw\" (UniqueName: \"kubernetes.io/projected/ba850830-ca3a-43c2-8639-bdf9386d7f9b-kube-api-access-qz8hw\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.407011 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-config-data\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.407103 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-scripts\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.419791 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.429946 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-scripts\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.431639 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz8hw\" (UniqueName: \"kubernetes.io/projected/ba850830-ca3a-43c2-8639-bdf9386d7f9b-kube-api-access-qz8hw\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.443054 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-config-data\") pod \"nova-cell1-conductor-db-sync-bglkq\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:29 crc kubenswrapper[4848]: I0128 13:10:29.699664 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:30 crc kubenswrapper[4848]: I0128 13:10:30.197950 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c1d23b5-4c91-4195-8290-4e7ae032bac9","Type":"ContainerStarted","Data":"abbb9d5014ee47d91b403a5cd17bdb50c3421418b328f5ad8e3732aa537c0485"} Jan 28 13:10:30 crc kubenswrapper[4848]: I0128 13:10:30.202976 4848 generic.go:334] "Generic (PLEG): container finished" podID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerID="8550601ae5964770372b7e5e5b357451d9b54d55944984d2a9ae3baef64b05f5" exitCode=0 Jan 28 13:10:30 crc kubenswrapper[4848]: I0128 13:10:30.203091 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" event={"ID":"86b85bd6-6e73-4373-b9bc-faea340b9e24","Type":"ContainerDied","Data":"8550601ae5964770372b7e5e5b357451d9b54d55944984d2a9ae3baef64b05f5"} Jan 28 13:10:30 crc kubenswrapper[4848]: I0128 13:10:30.203175 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" event={"ID":"86b85bd6-6e73-4373-b9bc-faea340b9e24","Type":"ContainerStarted","Data":"43c04236108e02f321921b522e2b9fc752cecdfb178e794d01c48b16b1812f35"} Jan 28 13:10:30 crc kubenswrapper[4848]: I0128 13:10:30.205268 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"356f4d06-990d-4f97-be70-555f01573201","Type":"ContainerStarted","Data":"25314d38050d5f86a0f78faccfda8f970b90e03e1822b3e3b6331c5824bd1ad4"} Jan 28 13:10:30 crc kubenswrapper[4848]: I0128 13:10:30.215131 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7c2944fc-8fcf-499b-8e98-8e026c1065d2","Type":"ContainerStarted","Data":"72abe7f680e351a52d891913e5e495b6af61108a4857bda28c4ef91922c64a50"} Jan 28 13:10:30 crc kubenswrapper[4848]: I0128 13:10:30.365729 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bglkq"] Jan 28 13:10:30 crc kubenswrapper[4848]: W0128 13:10:30.451529 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba850830_ca3a_43c2_8639_bdf9386d7f9b.slice/crio-1292a956f3002321b646a1b6b8c14b3a1bcfb19951a21e327119018adb346d67 WatchSource:0}: Error finding container 1292a956f3002321b646a1b6b8c14b3a1bcfb19951a21e327119018adb346d67: Status 404 returned error can't find the container with id 1292a956f3002321b646a1b6b8c14b3a1bcfb19951a21e327119018adb346d67 Jan 28 13:10:31 crc kubenswrapper[4848]: I0128 13:10:31.228953 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" event={"ID":"86b85bd6-6e73-4373-b9bc-faea340b9e24","Type":"ContainerStarted","Data":"37e2d30577d02770a0a1e5f2df69b6c17b1b47617f5053830f24d6d5aa7c0241"} Jan 28 13:10:31 crc kubenswrapper[4848]: I0128 13:10:31.229725 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:31 crc kubenswrapper[4848]: I0128 13:10:31.230717 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bglkq" event={"ID":"ba850830-ca3a-43c2-8639-bdf9386d7f9b","Type":"ContainerStarted","Data":"1292a956f3002321b646a1b6b8c14b3a1bcfb19951a21e327119018adb346d67"} Jan 28 13:10:31 crc kubenswrapper[4848]: I0128 13:10:31.259227 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" podStartSLOduration=4.259204452 podStartE2EDuration="4.259204452s" podCreationTimestamp="2026-01-28 13:10:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:31.251447418 +0000 UTC m=+1458.163664466" watchObservedRunningTime="2026-01-28 13:10:31.259204452 +0000 UTC m=+1458.171421490" Jan 28 13:10:31 crc kubenswrapper[4848]: I0128 13:10:31.907659 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:31 crc kubenswrapper[4848]: I0128 13:10:31.929058 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:10:33 crc kubenswrapper[4848]: I0128 13:10:33.261131 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bglkq" event={"ID":"ba850830-ca3a-43c2-8639-bdf9386d7f9b","Type":"ContainerStarted","Data":"ed772e9f95c32c4ce555ea9cb08e7cad878f5377ffb4f74863f0cf0ea90c01d2"} Jan 28 13:10:33 crc kubenswrapper[4848]: I0128 13:10:33.299445 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-bglkq" podStartSLOduration=4.299403994 podStartE2EDuration="4.299403994s" podCreationTimestamp="2026-01-28 13:10:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:33.280713579 +0000 UTC m=+1460.192930617" watchObservedRunningTime="2026-01-28 13:10:33.299403994 +0000 UTC m=+1460.211621032" Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.272895 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7c2944fc-8fcf-499b-8e98-8e026c1065d2","Type":"ContainerStarted","Data":"fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75"} Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.273504 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="7c2944fc-8fcf-499b-8e98-8e026c1065d2" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75" gracePeriod=30 Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.286760 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c1d23b5-4c91-4195-8290-4e7ae032bac9","Type":"ContainerStarted","Data":"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85"} Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.286836 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c1d23b5-4c91-4195-8290-4e7ae032bac9","Type":"ContainerStarted","Data":"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da"} Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.287002 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-log" containerID="cri-o://c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da" gracePeriod=30 Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.287161 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-metadata" containerID="cri-o://52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85" gracePeriod=30 Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.297805 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a879d23c-f2e2-47d8-929a-795e3bb19442","Type":"ContainerStarted","Data":"2ddcc04163e8f8d4871d4685d341406c55f1135d592bc18a4b47a697d6b4df07"} Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.297863 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a879d23c-f2e2-47d8-929a-795e3bb19442","Type":"ContainerStarted","Data":"bf04a1e385133343755486277220915ed4cb1e5020169ba85219ebeaf4a7ad8e"} Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.303270 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"356f4d06-990d-4f97-be70-555f01573201","Type":"ContainerStarted","Data":"317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6"} Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.310090 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.5837093749999998 podStartE2EDuration="7.310054205s" podCreationTimestamp="2026-01-28 13:10:27 +0000 UTC" firstStartedPulling="2026-01-28 13:10:29.42649927 +0000 UTC m=+1456.338716308" lastFinishedPulling="2026-01-28 13:10:33.1528441 +0000 UTC m=+1460.065061138" observedRunningTime="2026-01-28 13:10:34.303969348 +0000 UTC m=+1461.216186386" watchObservedRunningTime="2026-01-28 13:10:34.310054205 +0000 UTC m=+1461.222271233" Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.375922 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.135656462 podStartE2EDuration="7.375889967s" podCreationTimestamp="2026-01-28 13:10:27 +0000 UTC" firstStartedPulling="2026-01-28 13:10:28.927417352 +0000 UTC m=+1455.839634390" lastFinishedPulling="2026-01-28 13:10:33.167650857 +0000 UTC m=+1460.079867895" observedRunningTime="2026-01-28 13:10:34.337959974 +0000 UTC m=+1461.250177032" watchObservedRunningTime="2026-01-28 13:10:34.375889967 +0000 UTC m=+1461.288107005" Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.384190 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.411493086 podStartE2EDuration="7.384168845s" podCreationTimestamp="2026-01-28 13:10:27 +0000 UTC" firstStartedPulling="2026-01-28 13:10:29.178773692 +0000 UTC m=+1456.090990740" lastFinishedPulling="2026-01-28 13:10:33.151449441 +0000 UTC m=+1460.063666499" observedRunningTime="2026-01-28 13:10:34.360984117 +0000 UTC m=+1461.273201155" watchObservedRunningTime="2026-01-28 13:10:34.384168845 +0000 UTC m=+1461.296385873" Jan 28 13:10:34 crc kubenswrapper[4848]: I0128 13:10:34.425046 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.432600637 podStartE2EDuration="7.42501567s" podCreationTimestamp="2026-01-28 13:10:27 +0000 UTC" firstStartedPulling="2026-01-28 13:10:29.160527769 +0000 UTC m=+1456.072744807" lastFinishedPulling="2026-01-28 13:10:33.152942802 +0000 UTC m=+1460.065159840" observedRunningTime="2026-01-28 13:10:34.410592873 +0000 UTC m=+1461.322809931" watchObservedRunningTime="2026-01-28 13:10:34.42501567 +0000 UTC m=+1461.337232718" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.277928 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.326122 4848 generic.go:334] "Generic (PLEG): container finished" podID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerID="52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85" exitCode=0 Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.326159 4848 generic.go:334] "Generic (PLEG): container finished" podID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerID="c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da" exitCode=143 Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.327359 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.327919 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c1d23b5-4c91-4195-8290-4e7ae032bac9","Type":"ContainerDied","Data":"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85"} Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.327955 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c1d23b5-4c91-4195-8290-4e7ae032bac9","Type":"ContainerDied","Data":"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da"} Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.327970 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c1d23b5-4c91-4195-8290-4e7ae032bac9","Type":"ContainerDied","Data":"abbb9d5014ee47d91b403a5cd17bdb50c3421418b328f5ad8e3732aa537c0485"} Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.327986 4848 scope.go:117] "RemoveContainer" containerID="52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.329559 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-combined-ca-bundle\") pod \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.329627 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-config-data\") pod \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.329708 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c1d23b5-4c91-4195-8290-4e7ae032bac9-logs\") pod \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.329752 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62jsx\" (UniqueName: \"kubernetes.io/projected/0c1d23b5-4c91-4195-8290-4e7ae032bac9-kube-api-access-62jsx\") pod \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\" (UID: \"0c1d23b5-4c91-4195-8290-4e7ae032bac9\") " Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.332170 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c1d23b5-4c91-4195-8290-4e7ae032bac9-logs" (OuterVolumeSpecName: "logs") pod "0c1d23b5-4c91-4195-8290-4e7ae032bac9" (UID: "0c1d23b5-4c91-4195-8290-4e7ae032bac9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.347411 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c1d23b5-4c91-4195-8290-4e7ae032bac9-kube-api-access-62jsx" (OuterVolumeSpecName: "kube-api-access-62jsx") pod "0c1d23b5-4c91-4195-8290-4e7ae032bac9" (UID: "0c1d23b5-4c91-4195-8290-4e7ae032bac9"). InnerVolumeSpecName "kube-api-access-62jsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.390146 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-config-data" (OuterVolumeSpecName: "config-data") pod "0c1d23b5-4c91-4195-8290-4e7ae032bac9" (UID: "0c1d23b5-4c91-4195-8290-4e7ae032bac9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.394303 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c1d23b5-4c91-4195-8290-4e7ae032bac9" (UID: "0c1d23b5-4c91-4195-8290-4e7ae032bac9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.434045 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.434090 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1d23b5-4c91-4195-8290-4e7ae032bac9-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.434103 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c1d23b5-4c91-4195-8290-4e7ae032bac9-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.434113 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62jsx\" (UniqueName: \"kubernetes.io/projected/0c1d23b5-4c91-4195-8290-4e7ae032bac9-kube-api-access-62jsx\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.491788 4848 scope.go:117] "RemoveContainer" containerID="c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.514174 4848 scope.go:117] "RemoveContainer" containerID="52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85" Jan 28 13:10:35 crc kubenswrapper[4848]: E0128 13:10:35.514796 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85\": container with ID starting with 52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85 not found: ID does not exist" containerID="52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.514922 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85"} err="failed to get container status \"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85\": rpc error: code = NotFound desc = could not find container \"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85\": container with ID starting with 52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85 not found: ID does not exist" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.514959 4848 scope.go:117] "RemoveContainer" containerID="c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da" Jan 28 13:10:35 crc kubenswrapper[4848]: E0128 13:10:35.515583 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da\": container with ID starting with c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da not found: ID does not exist" containerID="c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.515608 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da"} err="failed to get container status \"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da\": rpc error: code = NotFound desc = could not find container \"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da\": container with ID starting with c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da not found: ID does not exist" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.515626 4848 scope.go:117] "RemoveContainer" containerID="52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.515936 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85"} err="failed to get container status \"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85\": rpc error: code = NotFound desc = could not find container \"52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85\": container with ID starting with 52028865fa6480e22137ff3900bf68d6baec56d946c8ae41aac130da1d269c85 not found: ID does not exist" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.515959 4848 scope.go:117] "RemoveContainer" containerID="c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.516211 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da"} err="failed to get container status \"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da\": rpc error: code = NotFound desc = could not find container \"c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da\": container with ID starting with c0f2fab1a2b620fbdbb049532ffe9e1cd552ac6a553d3fd54e967c9b9745b3da not found: ID does not exist" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.679614 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.708376 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.732371 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:35 crc kubenswrapper[4848]: E0128 13:10:35.733120 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-metadata" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.733146 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-metadata" Jan 28 13:10:35 crc kubenswrapper[4848]: E0128 13:10:35.733194 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-log" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.733205 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-log" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.733684 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-metadata" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.733723 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" containerName="nova-metadata-log" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.736099 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.743112 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.743336 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.747636 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.843599 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-logs\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.844127 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.844266 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjshx\" (UniqueName: \"kubernetes.io/projected/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-kube-api-access-kjshx\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.844397 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-config-data\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.844536 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.947306 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.947647 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-logs\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.947702 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.947732 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjshx\" (UniqueName: \"kubernetes.io/projected/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-kube-api-access-kjshx\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.947784 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-config-data\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.949068 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-logs\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.952764 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.958859 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-config-data\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.965864 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:35 crc kubenswrapper[4848]: I0128 13:10:35.970342 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjshx\" (UniqueName: \"kubernetes.io/projected/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-kube-api-access-kjshx\") pod \"nova-metadata-0\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " pod="openstack/nova-metadata-0" Jan 28 13:10:36 crc kubenswrapper[4848]: I0128 13:10:36.068206 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:36 crc kubenswrapper[4848]: I0128 13:10:36.585128 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:36 crc kubenswrapper[4848]: I0128 13:10:36.864546 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c1d23b5-4c91-4195-8290-4e7ae032bac9" path="/var/lib/kubelet/pods/0c1d23b5-4c91-4195-8290-4e7ae032bac9/volumes" Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.379377 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ecbed64-e1e6-4229-aa4e-ec1e8c845636","Type":"ContainerStarted","Data":"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9"} Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.379446 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ecbed64-e1e6-4229-aa4e-ec1e8c845636","Type":"ContainerStarted","Data":"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e"} Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.379457 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ecbed64-e1e6-4229-aa4e-ec1e8c845636","Type":"ContainerStarted","Data":"606d2f308e665ea249590320412bcd5c24f4c5b8add6c3aace7f2f09cf92366e"} Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.414269 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.414230537 podStartE2EDuration="2.414230537s" podCreationTimestamp="2026-01-28 13:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:37.402513954 +0000 UTC m=+1464.314731022" watchObservedRunningTime="2026-01-28 13:10:37.414230537 +0000 UTC m=+1464.326447575" Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.747695 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.747767 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.924239 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:10:37 crc kubenswrapper[4848]: I0128 13:10:37.924333 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.022978 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.023020 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.055459 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.393096 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.424063 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.792544 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.830513 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.206:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.830586 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.206:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.890793 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568df974c9-vxghc"] Jan 28 13:10:38 crc kubenswrapper[4848]: I0128 13:10:38.893193 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-568df974c9-vxghc" podUID="9e16da65-eb57-4041-90f2-00243246dabc" containerName="dnsmasq-dns" containerID="cri-o://0f31e1333be2ad20c0ef1291d9bf9da5a5196adaaef2223b664eca73dd619598" gracePeriod=10 Jan 28 13:10:39 crc kubenswrapper[4848]: I0128 13:10:39.409089 4848 generic.go:334] "Generic (PLEG): container finished" podID="9e16da65-eb57-4041-90f2-00243246dabc" containerID="0f31e1333be2ad20c0ef1291d9bf9da5a5196adaaef2223b664eca73dd619598" exitCode=0 Jan 28 13:10:39 crc kubenswrapper[4848]: I0128 13:10:39.409421 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568df974c9-vxghc" event={"ID":"9e16da65-eb57-4041-90f2-00243246dabc","Type":"ContainerDied","Data":"0f31e1333be2ad20c0ef1291d9bf9da5a5196adaaef2223b664eca73dd619598"} Jan 28 13:10:39 crc kubenswrapper[4848]: I0128 13:10:39.580715 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-568df974c9-vxghc" podUID="9e16da65-eb57-4041-90f2-00243246dabc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.178:5353: connect: connection refused" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.276766 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.364243 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-sb\") pod \"9e16da65-eb57-4041-90f2-00243246dabc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.364349 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-swift-storage-0\") pod \"9e16da65-eb57-4041-90f2-00243246dabc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.364388 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-config\") pod \"9e16da65-eb57-4041-90f2-00243246dabc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.364480 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-nb\") pod \"9e16da65-eb57-4041-90f2-00243246dabc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.364520 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b8f2\" (UniqueName: \"kubernetes.io/projected/9e16da65-eb57-4041-90f2-00243246dabc-kube-api-access-8b8f2\") pod \"9e16da65-eb57-4041-90f2-00243246dabc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.364616 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-svc\") pod \"9e16da65-eb57-4041-90f2-00243246dabc\" (UID: \"9e16da65-eb57-4041-90f2-00243246dabc\") " Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.390327 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e16da65-eb57-4041-90f2-00243246dabc-kube-api-access-8b8f2" (OuterVolumeSpecName: "kube-api-access-8b8f2") pod "9e16da65-eb57-4041-90f2-00243246dabc" (UID: "9e16da65-eb57-4041-90f2-00243246dabc"). InnerVolumeSpecName "kube-api-access-8b8f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.460091 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568df974c9-vxghc" event={"ID":"9e16da65-eb57-4041-90f2-00243246dabc","Type":"ContainerDied","Data":"677a72c01d91c876b5357f78fedb7c1ab2373790b642002d88b64dbfb5307337"} Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.460192 4848 scope.go:117] "RemoveContainer" containerID="0f31e1333be2ad20c0ef1291d9bf9da5a5196adaaef2223b664eca73dd619598" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.460802 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568df974c9-vxghc" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.492292 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b8f2\" (UniqueName: \"kubernetes.io/projected/9e16da65-eb57-4041-90f2-00243246dabc-kube-api-access-8b8f2\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.560226 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9e16da65-eb57-4041-90f2-00243246dabc" (UID: "9e16da65-eb57-4041-90f2-00243246dabc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.560610 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9e16da65-eb57-4041-90f2-00243246dabc" (UID: "9e16da65-eb57-4041-90f2-00243246dabc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.561698 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9e16da65-eb57-4041-90f2-00243246dabc" (UID: "9e16da65-eb57-4041-90f2-00243246dabc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.571350 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-config" (OuterVolumeSpecName: "config") pod "9e16da65-eb57-4041-90f2-00243246dabc" (UID: "9e16da65-eb57-4041-90f2-00243246dabc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.595113 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.595170 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.595188 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.595200 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.605950 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9e16da65-eb57-4041-90f2-00243246dabc" (UID: "9e16da65-eb57-4041-90f2-00243246dabc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.625525 4848 scope.go:117] "RemoveContainer" containerID="af598574efbb1972543573cfc16add6033cdd0aabae6d503b844c928a50bf80b" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.697952 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e16da65-eb57-4041-90f2-00243246dabc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.808664 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568df974c9-vxghc"] Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.822196 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-568df974c9-vxghc"] Jan 28 13:10:40 crc kubenswrapper[4848]: I0128 13:10:40.870167 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e16da65-eb57-4041-90f2-00243246dabc" path="/var/lib/kubelet/pods/9e16da65-eb57-4041-90f2-00243246dabc/volumes" Jan 28 13:10:41 crc kubenswrapper[4848]: I0128 13:10:41.069296 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 13:10:41 crc kubenswrapper[4848]: I0128 13:10:41.069376 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 13:10:41 crc kubenswrapper[4848]: I0128 13:10:41.478905 4848 generic.go:334] "Generic (PLEG): container finished" podID="77d21746-7f11-4c88-9433-8672991fe2e3" containerID="c23039557b3d2d5dddf6c6ed44be31cfe71c0d206b50bbab5afa2fda6fee2408" exitCode=0 Jan 28 13:10:41 crc kubenswrapper[4848]: I0128 13:10:41.479008 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xqjf4" event={"ID":"77d21746-7f11-4c88-9433-8672991fe2e3","Type":"ContainerDied","Data":"c23039557b3d2d5dddf6c6ed44be31cfe71c0d206b50bbab5afa2fda6fee2408"} Jan 28 13:10:42 crc kubenswrapper[4848]: I0128 13:10:42.991822 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.093909 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-config-data\") pod \"77d21746-7f11-4c88-9433-8672991fe2e3\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.094556 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-combined-ca-bundle\") pod \"77d21746-7f11-4c88-9433-8672991fe2e3\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.094657 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2hpk\" (UniqueName: \"kubernetes.io/projected/77d21746-7f11-4c88-9433-8672991fe2e3-kube-api-access-m2hpk\") pod \"77d21746-7f11-4c88-9433-8672991fe2e3\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.094734 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-scripts\") pod \"77d21746-7f11-4c88-9433-8672991fe2e3\" (UID: \"77d21746-7f11-4c88-9433-8672991fe2e3\") " Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.114919 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-scripts" (OuterVolumeSpecName: "scripts") pod "77d21746-7f11-4c88-9433-8672991fe2e3" (UID: "77d21746-7f11-4c88-9433-8672991fe2e3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.114998 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d21746-7f11-4c88-9433-8672991fe2e3-kube-api-access-m2hpk" (OuterVolumeSpecName: "kube-api-access-m2hpk") pod "77d21746-7f11-4c88-9433-8672991fe2e3" (UID: "77d21746-7f11-4c88-9433-8672991fe2e3"). InnerVolumeSpecName "kube-api-access-m2hpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.129240 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77d21746-7f11-4c88-9433-8672991fe2e3" (UID: "77d21746-7f11-4c88-9433-8672991fe2e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.138039 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-config-data" (OuterVolumeSpecName: "config-data") pod "77d21746-7f11-4c88-9433-8672991fe2e3" (UID: "77d21746-7f11-4c88-9433-8672991fe2e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.197898 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.198096 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.198188 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2hpk\" (UniqueName: \"kubernetes.io/projected/77d21746-7f11-4c88-9433-8672991fe2e3-kube-api-access-m2hpk\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.198324 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77d21746-7f11-4c88-9433-8672991fe2e3-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.511922 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xqjf4" event={"ID":"77d21746-7f11-4c88-9433-8672991fe2e3","Type":"ContainerDied","Data":"7c8b882be1f86673e1291f63c41db757bea18062eed0469909782c842a995bc1"} Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.511971 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c8b882be1f86673e1291f63c41db757bea18062eed0469909782c842a995bc1" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.512062 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xqjf4" Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.720930 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.721608 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-log" containerID="cri-o://bf04a1e385133343755486277220915ed4cb1e5020169ba85219ebeaf4a7ad8e" gracePeriod=30 Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.721709 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-api" containerID="cri-o://2ddcc04163e8f8d4871d4685d341406c55f1135d592bc18a4b47a697d6b4df07" gracePeriod=30 Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.750784 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.751124 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="356f4d06-990d-4f97-be70-555f01573201" containerName="nova-scheduler-scheduler" containerID="cri-o://317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6" gracePeriod=30 Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.806689 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.807057 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-log" containerID="cri-o://db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e" gracePeriod=30 Jan 28 13:10:43 crc kubenswrapper[4848]: I0128 13:10:43.807217 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-metadata" containerID="cri-o://b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9" gracePeriod=30 Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.488848 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.525625 4848 generic.go:334] "Generic (PLEG): container finished" podID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerID="bf04a1e385133343755486277220915ed4cb1e5020169ba85219ebeaf4a7ad8e" exitCode=143 Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.525716 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a879d23c-f2e2-47d8-929a-795e3bb19442","Type":"ContainerDied","Data":"bf04a1e385133343755486277220915ed4cb1e5020169ba85219ebeaf4a7ad8e"} Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.528636 4848 generic.go:334] "Generic (PLEG): container finished" podID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerID="b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9" exitCode=0 Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.528697 4848 generic.go:334] "Generic (PLEG): container finished" podID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerID="db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e" exitCode=143 Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.528750 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ecbed64-e1e6-4229-aa4e-ec1e8c845636","Type":"ContainerDied","Data":"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9"} Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.528813 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ecbed64-e1e6-4229-aa4e-ec1e8c845636","Type":"ContainerDied","Data":"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e"} Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.528765 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.528847 4848 scope.go:117] "RemoveContainer" containerID="b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.528827 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ecbed64-e1e6-4229-aa4e-ec1e8c845636","Type":"ContainerDied","Data":"606d2f308e665ea249590320412bcd5c24f4c5b8add6c3aace7f2f09cf92366e"} Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.542595 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjshx\" (UniqueName: \"kubernetes.io/projected/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-kube-api-access-kjshx\") pod \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.543816 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-nova-metadata-tls-certs\") pod \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.543915 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-combined-ca-bundle\") pod \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.544003 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-config-data\") pod \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.544069 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-logs\") pod \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\" (UID: \"6ecbed64-e1e6-4229-aa4e-ec1e8c845636\") " Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.547399 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-logs" (OuterVolumeSpecName: "logs") pod "6ecbed64-e1e6-4229-aa4e-ec1e8c845636" (UID: "6ecbed64-e1e6-4229-aa4e-ec1e8c845636"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.559578 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-kube-api-access-kjshx" (OuterVolumeSpecName: "kube-api-access-kjshx") pod "6ecbed64-e1e6-4229-aa4e-ec1e8c845636" (UID: "6ecbed64-e1e6-4229-aa4e-ec1e8c845636"). InnerVolumeSpecName "kube-api-access-kjshx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.568702 4848 scope.go:117] "RemoveContainer" containerID="db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.581369 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ecbed64-e1e6-4229-aa4e-ec1e8c845636" (UID: "6ecbed64-e1e6-4229-aa4e-ec1e8c845636"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.598515 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-config-data" (OuterVolumeSpecName: "config-data") pod "6ecbed64-e1e6-4229-aa4e-ec1e8c845636" (UID: "6ecbed64-e1e6-4229-aa4e-ec1e8c845636"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.638094 4848 scope.go:117] "RemoveContainer" containerID="b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9" Jan 28 13:10:44 crc kubenswrapper[4848]: E0128 13:10:44.639111 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9\": container with ID starting with b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9 not found: ID does not exist" containerID="b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.639160 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9"} err="failed to get container status \"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9\": rpc error: code = NotFound desc = could not find container \"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9\": container with ID starting with b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9 not found: ID does not exist" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.639230 4848 scope.go:117] "RemoveContainer" containerID="db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e" Jan 28 13:10:44 crc kubenswrapper[4848]: E0128 13:10:44.640369 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e\": container with ID starting with db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e not found: ID does not exist" containerID="db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.640422 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e"} err="failed to get container status \"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e\": rpc error: code = NotFound desc = could not find container \"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e\": container with ID starting with db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e not found: ID does not exist" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.640451 4848 scope.go:117] "RemoveContainer" containerID="b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.642019 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9"} err="failed to get container status \"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9\": rpc error: code = NotFound desc = could not find container \"b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9\": container with ID starting with b3df7ed804a0106852482742f9ffd002e01876ce76999cb5d033fbc2c5093be9 not found: ID does not exist" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.642063 4848 scope.go:117] "RemoveContainer" containerID="db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.642494 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e"} err="failed to get container status \"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e\": rpc error: code = NotFound desc = could not find container \"db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e\": container with ID starting with db59ffb308be5e93ddb0e91e273b98f3119622eccdc88fd4b13416ff607f180e not found: ID does not exist" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.645898 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "6ecbed64-e1e6-4229-aa4e-ec1e8c845636" (UID: "6ecbed64-e1e6-4229-aa4e-ec1e8c845636"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.646411 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjshx\" (UniqueName: \"kubernetes.io/projected/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-kube-api-access-kjshx\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.646434 4848 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.646447 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.646456 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.646465 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ecbed64-e1e6-4229-aa4e-ec1e8c845636-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.884560 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.908107 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.926836 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:44 crc kubenswrapper[4848]: E0128 13:10:44.927381 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-metadata" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927399 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-metadata" Jan 28 13:10:44 crc kubenswrapper[4848]: E0128 13:10:44.927426 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e16da65-eb57-4041-90f2-00243246dabc" containerName="init" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927434 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e16da65-eb57-4041-90f2-00243246dabc" containerName="init" Jan 28 13:10:44 crc kubenswrapper[4848]: E0128 13:10:44.927452 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d21746-7f11-4c88-9433-8672991fe2e3" containerName="nova-manage" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927459 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d21746-7f11-4c88-9433-8672991fe2e3" containerName="nova-manage" Jan 28 13:10:44 crc kubenswrapper[4848]: E0128 13:10:44.927592 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e16da65-eb57-4041-90f2-00243246dabc" containerName="dnsmasq-dns" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927603 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e16da65-eb57-4041-90f2-00243246dabc" containerName="dnsmasq-dns" Jan 28 13:10:44 crc kubenswrapper[4848]: E0128 13:10:44.927627 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-log" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927635 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-log" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927867 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e16da65-eb57-4041-90f2-00243246dabc" containerName="dnsmasq-dns" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927896 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-metadata" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927911 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d21746-7f11-4c88-9433-8672991fe2e3" containerName="nova-manage" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.927929 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" containerName="nova-metadata-log" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.929108 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.935754 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.936003 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 13:10:44 crc kubenswrapper[4848]: I0128 13:10:44.939868 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.058578 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-config-data\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.058685 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10e136b8-b94a-47d4-ad21-0646f193e596-logs\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.058782 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.058825 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.059153 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzdqn\" (UniqueName: \"kubernetes.io/projected/10e136b8-b94a-47d4-ad21-0646f193e596-kube-api-access-vzdqn\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.161830 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-config-data\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.161921 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10e136b8-b94a-47d4-ad21-0646f193e596-logs\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.162008 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.162067 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.162234 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzdqn\" (UniqueName: \"kubernetes.io/projected/10e136b8-b94a-47d4-ad21-0646f193e596-kube-api-access-vzdqn\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.162633 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10e136b8-b94a-47d4-ad21-0646f193e596-logs\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.167843 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.169969 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-config-data\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.172682 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.183140 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzdqn\" (UniqueName: \"kubernetes.io/projected/10e136b8-b94a-47d4-ad21-0646f193e596-kube-api-access-vzdqn\") pod \"nova-metadata-0\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.308847 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.555512 4848 generic.go:334] "Generic (PLEG): container finished" podID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerID="2ddcc04163e8f8d4871d4685d341406c55f1135d592bc18a4b47a697d6b4df07" exitCode=0 Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.555867 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a879d23c-f2e2-47d8-929a-795e3bb19442","Type":"ContainerDied","Data":"2ddcc04163e8f8d4871d4685d341406c55f1135d592bc18a4b47a697d6b4df07"} Jan 28 13:10:45 crc kubenswrapper[4848]: I0128 13:10:45.906096 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.220164 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.304781 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-combined-ca-bundle\") pod \"a879d23c-f2e2-47d8-929a-795e3bb19442\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.304904 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-config-data\") pod \"a879d23c-f2e2-47d8-929a-795e3bb19442\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.304965 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vcgs\" (UniqueName: \"kubernetes.io/projected/a879d23c-f2e2-47d8-929a-795e3bb19442-kube-api-access-6vcgs\") pod \"a879d23c-f2e2-47d8-929a-795e3bb19442\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.305024 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a879d23c-f2e2-47d8-929a-795e3bb19442-logs\") pod \"a879d23c-f2e2-47d8-929a-795e3bb19442\" (UID: \"a879d23c-f2e2-47d8-929a-795e3bb19442\") " Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.305883 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a879d23c-f2e2-47d8-929a-795e3bb19442-logs" (OuterVolumeSpecName: "logs") pod "a879d23c-f2e2-47d8-929a-795e3bb19442" (UID: "a879d23c-f2e2-47d8-929a-795e3bb19442"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.317706 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a879d23c-f2e2-47d8-929a-795e3bb19442-kube-api-access-6vcgs" (OuterVolumeSpecName: "kube-api-access-6vcgs") pod "a879d23c-f2e2-47d8-929a-795e3bb19442" (UID: "a879d23c-f2e2-47d8-929a-795e3bb19442"). InnerVolumeSpecName "kube-api-access-6vcgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.342972 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a879d23c-f2e2-47d8-929a-795e3bb19442" (UID: "a879d23c-f2e2-47d8-929a-795e3bb19442"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.362352 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-config-data" (OuterVolumeSpecName: "config-data") pod "a879d23c-f2e2-47d8-929a-795e3bb19442" (UID: "a879d23c-f2e2-47d8-929a-795e3bb19442"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.408501 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.408535 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a879d23c-f2e2-47d8-929a-795e3bb19442-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.408574 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vcgs\" (UniqueName: \"kubernetes.io/projected/a879d23c-f2e2-47d8-929a-795e3bb19442-kube-api-access-6vcgs\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.408590 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a879d23c-f2e2-47d8-929a-795e3bb19442-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.449666 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.568866 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a879d23c-f2e2-47d8-929a-795e3bb19442","Type":"ContainerDied","Data":"1bc752b839da21a43bd6becba74185d28bf9abfda07f6af5ade816210af5d1a2"} Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.568881 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.568943 4848 scope.go:117] "RemoveContainer" containerID="2ddcc04163e8f8d4871d4685d341406c55f1135d592bc18a4b47a697d6b4df07" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.575845 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10e136b8-b94a-47d4-ad21-0646f193e596","Type":"ContainerStarted","Data":"82295e29aef7c2f2a5b64a93dde3154d43cb7845184884fd0e2fab317ceb219a"} Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.575886 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10e136b8-b94a-47d4-ad21-0646f193e596","Type":"ContainerStarted","Data":"07b290654e2289aa63a23d07a3307330e7aaf26616ee57256f1df9eef41380c8"} Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.575906 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10e136b8-b94a-47d4-ad21-0646f193e596","Type":"ContainerStarted","Data":"60fe11837a6a881304b265345203fa544e70c316b2300aaa5c7e55b46dc29f10"} Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.578273 4848 generic.go:334] "Generic (PLEG): container finished" podID="356f4d06-990d-4f97-be70-555f01573201" containerID="317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6" exitCode=0 Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.578334 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.578483 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"356f4d06-990d-4f97-be70-555f01573201","Type":"ContainerDied","Data":"317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6"} Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.578586 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"356f4d06-990d-4f97-be70-555f01573201","Type":"ContainerDied","Data":"25314d38050d5f86a0f78faccfda8f970b90e03e1822b3e3b6331c5824bd1ad4"} Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.607143 4848 scope.go:117] "RemoveContainer" containerID="bf04a1e385133343755486277220915ed4cb1e5020169ba85219ebeaf4a7ad8e" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.612683 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mq2vk\" (UniqueName: \"kubernetes.io/projected/356f4d06-990d-4f97-be70-555f01573201-kube-api-access-mq2vk\") pod \"356f4d06-990d-4f97-be70-555f01573201\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.612775 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-combined-ca-bundle\") pod \"356f4d06-990d-4f97-be70-555f01573201\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.612905 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-config-data\") pod \"356f4d06-990d-4f97-be70-555f01573201\" (UID: \"356f4d06-990d-4f97-be70-555f01573201\") " Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.618885 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/356f4d06-990d-4f97-be70-555f01573201-kube-api-access-mq2vk" (OuterVolumeSpecName: "kube-api-access-mq2vk") pod "356f4d06-990d-4f97-be70-555f01573201" (UID: "356f4d06-990d-4f97-be70-555f01573201"). InnerVolumeSpecName "kube-api-access-mq2vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.623753 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.623725914 podStartE2EDuration="2.623725914s" podCreationTimestamp="2026-01-28 13:10:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:46.607441406 +0000 UTC m=+1473.519658444" watchObservedRunningTime="2026-01-28 13:10:46.623725914 +0000 UTC m=+1473.535942942" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.632823 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.644344 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.656128 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "356f4d06-990d-4f97-be70-555f01573201" (UID: "356f4d06-990d-4f97-be70-555f01573201"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.657778 4848 scope.go:117] "RemoveContainer" containerID="317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.658165 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-config-data" (OuterVolumeSpecName: "config-data") pod "356f4d06-990d-4f97-be70-555f01573201" (UID: "356f4d06-990d-4f97-be70-555f01573201"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.687362 4848 scope.go:117] "RemoveContainer" containerID="317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.687797 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: E0128 13:10:46.688219 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6\": container with ID starting with 317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6 not found: ID does not exist" containerID="317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.688282 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6"} err="failed to get container status \"317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6\": rpc error: code = NotFound desc = could not find container \"317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6\": container with ID starting with 317f71cc233e2a18914bbb932dc06db9296e35e0cde63ebade3fa51a662d92a6 not found: ID does not exist" Jan 28 13:10:46 crc kubenswrapper[4848]: E0128 13:10:46.688555 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-api" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.688585 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-api" Jan 28 13:10:46 crc kubenswrapper[4848]: E0128 13:10:46.688643 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-log" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.688653 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-log" Jan 28 13:10:46 crc kubenswrapper[4848]: E0128 13:10:46.688675 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="356f4d06-990d-4f97-be70-555f01573201" containerName="nova-scheduler-scheduler" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.688684 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="356f4d06-990d-4f97-be70-555f01573201" containerName="nova-scheduler-scheduler" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.688945 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-api" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.688989 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="356f4d06-990d-4f97-be70-555f01573201" containerName="nova-scheduler-scheduler" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.689009 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" containerName="nova-api-log" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.700648 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.708074 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.717212 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.717381 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356f4d06-990d-4f97-be70-555f01573201-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.717399 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mq2vk\" (UniqueName: \"kubernetes.io/projected/356f4d06-990d-4f97-be70-555f01573201-kube-api-access-mq2vk\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.750434 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.821860 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b68aad89-78f2-4015-8794-9614c759cc4a-logs\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.821921 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kjrr\" (UniqueName: \"kubernetes.io/projected/b68aad89-78f2-4015-8794-9614c759cc4a-kube-api-access-7kjrr\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.821980 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-config-data\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.822090 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.864991 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ecbed64-e1e6-4229-aa4e-ec1e8c845636" path="/var/lib/kubelet/pods/6ecbed64-e1e6-4229-aa4e-ec1e8c845636/volumes" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.865701 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a879d23c-f2e2-47d8-929a-795e3bb19442" path="/var/lib/kubelet/pods/a879d23c-f2e2-47d8-929a-795e3bb19442/volumes" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.904312 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.920974 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.923781 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.923963 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b68aad89-78f2-4015-8794-9614c759cc4a-logs\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.924039 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kjrr\" (UniqueName: \"kubernetes.io/projected/b68aad89-78f2-4015-8794-9614c759cc4a-kube-api-access-7kjrr\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.924140 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-config-data\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.952785 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.954657 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.959380 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.970794 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b68aad89-78f2-4015-8794-9614c759cc4a-logs\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.971979 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.973373 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.975125 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-config-data\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:46 crc kubenswrapper[4848]: I0128 13:10:46.982164 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kjrr\" (UniqueName: \"kubernetes.io/projected/b68aad89-78f2-4015-8794-9614c759cc4a-kube-api-access-7kjrr\") pod \"nova-api-0\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " pod="openstack/nova-api-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.025895 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-config-data\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.025946 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.026052 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txnl2\" (UniqueName: \"kubernetes.io/projected/9aea94d4-8c5e-4305-85c8-bdacebcf990e-kube-api-access-txnl2\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.054609 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.128150 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txnl2\" (UniqueName: \"kubernetes.io/projected/9aea94d4-8c5e-4305-85c8-bdacebcf990e-kube-api-access-txnl2\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.128968 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-config-data\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.129004 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.134471 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-config-data\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.140939 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.146560 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txnl2\" (UniqueName: \"kubernetes.io/projected/9aea94d4-8c5e-4305-85c8-bdacebcf990e-kube-api-access-txnl2\") pod \"nova-scheduler-0\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.278315 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.600887 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:10:47 crc kubenswrapper[4848]: W0128 13:10:47.610205 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb68aad89_78f2_4015_8794_9614c759cc4a.slice/crio-fd57e4ded9bb7bdc08952f0b5195a4930aec4b6b17b4595f742a9b5ca3bcd863 WatchSource:0}: Error finding container fd57e4ded9bb7bdc08952f0b5195a4930aec4b6b17b4595f742a9b5ca3bcd863: Status 404 returned error can't find the container with id fd57e4ded9bb7bdc08952f0b5195a4930aec4b6b17b4595f742a9b5ca3bcd863 Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.648568 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 13:10:47 crc kubenswrapper[4848]: I0128 13:10:47.830742 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:10:47 crc kubenswrapper[4848]: W0128 13:10:47.832112 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9aea94d4_8c5e_4305_85c8_bdacebcf990e.slice/crio-f3fb325256d72bcdf77faee22072bbd5fab3af347a7676436ca4c13dc649f8b1 WatchSource:0}: Error finding container f3fb325256d72bcdf77faee22072bbd5fab3af347a7676436ca4c13dc649f8b1: Status 404 returned error can't find the container with id f3fb325256d72bcdf77faee22072bbd5fab3af347a7676436ca4c13dc649f8b1 Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.625777 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b68aad89-78f2-4015-8794-9614c759cc4a","Type":"ContainerStarted","Data":"391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c"} Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.626175 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b68aad89-78f2-4015-8794-9614c759cc4a","Type":"ContainerStarted","Data":"728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9"} Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.626187 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b68aad89-78f2-4015-8794-9614c759cc4a","Type":"ContainerStarted","Data":"fd57e4ded9bb7bdc08952f0b5195a4930aec4b6b17b4595f742a9b5ca3bcd863"} Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.628177 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aea94d4-8c5e-4305-85c8-bdacebcf990e","Type":"ContainerStarted","Data":"0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a"} Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.628227 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aea94d4-8c5e-4305-85c8-bdacebcf990e","Type":"ContainerStarted","Data":"f3fb325256d72bcdf77faee22072bbd5fab3af347a7676436ca4c13dc649f8b1"} Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.667809 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.667780763 podStartE2EDuration="2.667780763s" podCreationTimestamp="2026-01-28 13:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:48.657188611 +0000 UTC m=+1475.569405659" watchObservedRunningTime="2026-01-28 13:10:48.667780763 +0000 UTC m=+1475.579997801" Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.697377 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.697358317 podStartE2EDuration="2.697358317s" podCreationTimestamp="2026-01-28 13:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:48.689064629 +0000 UTC m=+1475.601281667" watchObservedRunningTime="2026-01-28 13:10:48.697358317 +0000 UTC m=+1475.609575355" Jan 28 13:10:48 crc kubenswrapper[4848]: I0128 13:10:48.863937 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="356f4d06-990d-4f97-be70-555f01573201" path="/var/lib/kubelet/pods/356f4d06-990d-4f97-be70-555f01573201/volumes" Jan 28 13:10:50 crc kubenswrapper[4848]: I0128 13:10:50.309637 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 13:10:50 crc kubenswrapper[4848]: I0128 13:10:50.310117 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 13:10:52 crc kubenswrapper[4848]: I0128 13:10:52.280293 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 13:10:52 crc kubenswrapper[4848]: I0128 13:10:52.672041 4848 generic.go:334] "Generic (PLEG): container finished" podID="ba850830-ca3a-43c2-8639-bdf9386d7f9b" containerID="ed772e9f95c32c4ce555ea9cb08e7cad878f5377ffb4f74863f0cf0ea90c01d2" exitCode=0 Jan 28 13:10:52 crc kubenswrapper[4848]: I0128 13:10:52.672148 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bglkq" event={"ID":"ba850830-ca3a-43c2-8639-bdf9386d7f9b","Type":"ContainerDied","Data":"ed772e9f95c32c4ce555ea9cb08e7cad878f5377ffb4f74863f0cf0ea90c01d2"} Jan 28 13:10:52 crc kubenswrapper[4848]: I0128 13:10:52.974480 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:10:52 crc kubenswrapper[4848]: I0128 13:10:52.974877 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="4d631c7a-117c-4a10-a7f6-28331bc4ae84" containerName="kube-state-metrics" containerID="cri-o://b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc" gracePeriod=30 Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.674495 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.686565 4848 generic.go:334] "Generic (PLEG): container finished" podID="4d631c7a-117c-4a10-a7f6-28331bc4ae84" containerID="b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc" exitCode=2 Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.687182 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.687390 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4d631c7a-117c-4a10-a7f6-28331bc4ae84","Type":"ContainerDied","Data":"b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc"} Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.688085 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4d631c7a-117c-4a10-a7f6-28331bc4ae84","Type":"ContainerDied","Data":"b70e86e58bd3f4ffd5fc1ef25896c04958fad3c6add1d6b0fdddace71ff66be8"} Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.688114 4848 scope.go:117] "RemoveContainer" containerID="b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc" Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.752015 4848 scope.go:117] "RemoveContainer" containerID="b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc" Jan 28 13:10:53 crc kubenswrapper[4848]: E0128 13:10:53.755181 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc\": container with ID starting with b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc not found: ID does not exist" containerID="b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc" Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.755244 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc"} err="failed to get container status \"b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc\": rpc error: code = NotFound desc = could not find container \"b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc\": container with ID starting with b19273a557bbdd5189f9573dabd6b8505d31815ed77595838cf820d5ef7640fc not found: ID does not exist" Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.826463 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-td8q9\" (UniqueName: \"kubernetes.io/projected/4d631c7a-117c-4a10-a7f6-28331bc4ae84-kube-api-access-td8q9\") pod \"4d631c7a-117c-4a10-a7f6-28331bc4ae84\" (UID: \"4d631c7a-117c-4a10-a7f6-28331bc4ae84\") " Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.859911 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d631c7a-117c-4a10-a7f6-28331bc4ae84-kube-api-access-td8q9" (OuterVolumeSpecName: "kube-api-access-td8q9") pod "4d631c7a-117c-4a10-a7f6-28331bc4ae84" (UID: "4d631c7a-117c-4a10-a7f6-28331bc4ae84"). InnerVolumeSpecName "kube-api-access-td8q9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:53 crc kubenswrapper[4848]: I0128 13:10:53.932120 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-td8q9\" (UniqueName: \"kubernetes.io/projected/4d631c7a-117c-4a10-a7f6-28331bc4ae84-kube-api-access-td8q9\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.048614 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.061597 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.077041 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:10:54 crc kubenswrapper[4848]: E0128 13:10:54.077851 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d631c7a-117c-4a10-a7f6-28331bc4ae84" containerName="kube-state-metrics" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.077871 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d631c7a-117c-4a10-a7f6-28331bc4ae84" containerName="kube-state-metrics" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.078072 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d631c7a-117c-4a10-a7f6-28331bc4ae84" containerName="kube-state-metrics" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.078974 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.081923 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.083117 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.103839 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.142246 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.142389 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.142433 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.142470 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcw9b\" (UniqueName: \"kubernetes.io/projected/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-api-access-hcw9b\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.243107 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.244832 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.244893 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.244927 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcw9b\" (UniqueName: \"kubernetes.io/projected/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-api-access-hcw9b\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.245169 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.251297 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.251499 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.253428 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.270297 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcw9b\" (UniqueName: \"kubernetes.io/projected/791ef386-40ae-4395-aa5d-b86f13307c6c-kube-api-access-hcw9b\") pod \"kube-state-metrics-0\" (UID: \"791ef386-40ae-4395-aa5d-b86f13307c6c\") " pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.346228 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-config-data\") pod \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.347362 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-combined-ca-bundle\") pod \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.347525 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-scripts\") pod \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.347834 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz8hw\" (UniqueName: \"kubernetes.io/projected/ba850830-ca3a-43c2-8639-bdf9386d7f9b-kube-api-access-qz8hw\") pod \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\" (UID: \"ba850830-ca3a-43c2-8639-bdf9386d7f9b\") " Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.353862 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba850830-ca3a-43c2-8639-bdf9386d7f9b-kube-api-access-qz8hw" (OuterVolumeSpecName: "kube-api-access-qz8hw") pod "ba850830-ca3a-43c2-8639-bdf9386d7f9b" (UID: "ba850830-ca3a-43c2-8639-bdf9386d7f9b"). InnerVolumeSpecName "kube-api-access-qz8hw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.354710 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-scripts" (OuterVolumeSpecName: "scripts") pod "ba850830-ca3a-43c2-8639-bdf9386d7f9b" (UID: "ba850830-ca3a-43c2-8639-bdf9386d7f9b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.383241 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba850830-ca3a-43c2-8639-bdf9386d7f9b" (UID: "ba850830-ca3a-43c2-8639-bdf9386d7f9b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.408395 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.421297 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-config-data" (OuterVolumeSpecName: "config-data") pod "ba850830-ca3a-43c2-8639-bdf9386d7f9b" (UID: "ba850830-ca3a-43c2-8639-bdf9386d7f9b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.450894 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.450952 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.450967 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz8hw\" (UniqueName: \"kubernetes.io/projected/ba850830-ca3a-43c2-8639-bdf9386d7f9b-kube-api-access-qz8hw\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.450979 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba850830-ca3a-43c2-8639-bdf9386d7f9b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.701914 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bglkq" event={"ID":"ba850830-ca3a-43c2-8639-bdf9386d7f9b","Type":"ContainerDied","Data":"1292a956f3002321b646a1b6b8c14b3a1bcfb19951a21e327119018adb346d67"} Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.701951 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bglkq" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.701968 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1292a956f3002321b646a1b6b8c14b3a1bcfb19951a21e327119018adb346d67" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.811853 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 13:10:54 crc kubenswrapper[4848]: E0128 13:10:54.812602 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba850830-ca3a-43c2-8639-bdf9386d7f9b" containerName="nova-cell1-conductor-db-sync" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.812626 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba850830-ca3a-43c2-8639-bdf9386d7f9b" containerName="nova-cell1-conductor-db-sync" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.812850 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba850830-ca3a-43c2-8639-bdf9386d7f9b" containerName="nova-cell1-conductor-db-sync" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.813744 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.825906 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.829786 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.868182 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d631c7a-117c-4a10-a7f6-28331bc4ae84" path="/var/lib/kubelet/pods/4d631c7a-117c-4a10-a7f6-28331bc4ae84/volumes" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.970215 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df8d8482-966f-4f20-836e-09bef423d150-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.970359 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9kpp\" (UniqueName: \"kubernetes.io/projected/df8d8482-966f-4f20-836e-09bef423d150-kube-api-access-h9kpp\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.970444 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df8d8482-966f-4f20-836e-09bef423d150-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:54 crc kubenswrapper[4848]: I0128 13:10:54.994815 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 13:10:55 crc kubenswrapper[4848]: W0128 13:10:55.022207 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod791ef386_40ae_4395_aa5d_b86f13307c6c.slice/crio-9a435993ea3ae6bddab8bc4c34372646e975aeda1e3f38e9d7b40674f3ec1f87 WatchSource:0}: Error finding container 9a435993ea3ae6bddab8bc4c34372646e975aeda1e3f38e9d7b40674f3ec1f87: Status 404 returned error can't find the container with id 9a435993ea3ae6bddab8bc4c34372646e975aeda1e3f38e9d7b40674f3ec1f87 Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.072370 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df8d8482-966f-4f20-836e-09bef423d150-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.072507 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df8d8482-966f-4f20-836e-09bef423d150-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.072574 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9kpp\" (UniqueName: \"kubernetes.io/projected/df8d8482-966f-4f20-836e-09bef423d150-kube-api-access-h9kpp\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.079220 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df8d8482-966f-4f20-836e-09bef423d150-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.080454 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df8d8482-966f-4f20-836e-09bef423d150-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.092933 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9kpp\" (UniqueName: \"kubernetes.io/projected/df8d8482-966f-4f20-836e-09bef423d150-kube-api-access-h9kpp\") pod \"nova-cell1-conductor-0\" (UID: \"df8d8482-966f-4f20-836e-09bef423d150\") " pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.141319 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.309708 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.310190 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.704200 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 13:10:55 crc kubenswrapper[4848]: I0128 13:10:55.718863 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"791ef386-40ae-4395-aa5d-b86f13307c6c","Type":"ContainerStarted","Data":"9a435993ea3ae6bddab8bc4c34372646e975aeda1e3f38e9d7b40674f3ec1f87"} Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.322393 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.213:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.322654 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.213:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.366884 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.367314 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-central-agent" containerID="cri-o://204f3b263bc8aa130403f00a8758ca4664166abddd147dfc8151611f4e69a4dd" gracePeriod=30 Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.367504 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="proxy-httpd" containerID="cri-o://b258cfdb1e9f77810684d68ef1b20418e2f7fc94b2baa31b045707b282dee60f" gracePeriod=30 Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.367567 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="sg-core" containerID="cri-o://d70d3131bb5423997c9a4af376dcea61493d0c82157d560a0144fe5638cd2220" gracePeriod=30 Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.367614 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-notification-agent" containerID="cri-o://87c6339dbf0b935c2b4718a1163faa86e98cf90bced8282e8c1f459b3f216529" gracePeriod=30 Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.776109 4848 generic.go:334] "Generic (PLEG): container finished" podID="9e026660-5464-42b9-876f-0d28b39f28fe" containerID="b258cfdb1e9f77810684d68ef1b20418e2f7fc94b2baa31b045707b282dee60f" exitCode=0 Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.776141 4848 generic.go:334] "Generic (PLEG): container finished" podID="9e026660-5464-42b9-876f-0d28b39f28fe" containerID="d70d3131bb5423997c9a4af376dcea61493d0c82157d560a0144fe5638cd2220" exitCode=2 Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.776215 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerDied","Data":"b258cfdb1e9f77810684d68ef1b20418e2f7fc94b2baa31b045707b282dee60f"} Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.776301 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerDied","Data":"d70d3131bb5423997c9a4af376dcea61493d0c82157d560a0144fe5638cd2220"} Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.780907 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"791ef386-40ae-4395-aa5d-b86f13307c6c","Type":"ContainerStarted","Data":"c8f4987a4c1f6343127a766424fbe854c874e4b715239afd04774fea36d53da7"} Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.781081 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.784283 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"df8d8482-966f-4f20-836e-09bef423d150","Type":"ContainerStarted","Data":"523706909b48b93ba77c4d858cc42ee0c2274488c62ed8484507ceb46055faea"} Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.784341 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"df8d8482-966f-4f20-836e-09bef423d150","Type":"ContainerStarted","Data":"b3b931fa7bcf4074b17417366b62a8287915575f9655083dfc60175b05b27f68"} Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.784394 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.822550 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.41400062 podStartE2EDuration="2.822521956s" podCreationTimestamp="2026-01-28 13:10:54 +0000 UTC" firstStartedPulling="2026-01-28 13:10:55.030126295 +0000 UTC m=+1481.942343323" lastFinishedPulling="2026-01-28 13:10:55.438647621 +0000 UTC m=+1482.350864659" observedRunningTime="2026-01-28 13:10:56.807472101 +0000 UTC m=+1483.719689139" watchObservedRunningTime="2026-01-28 13:10:56.822521956 +0000 UTC m=+1483.734738994" Jan 28 13:10:56 crc kubenswrapper[4848]: I0128 13:10:56.843246 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.843222016 podStartE2EDuration="2.843222016s" podCreationTimestamp="2026-01-28 13:10:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:10:56.834887616 +0000 UTC m=+1483.747104664" watchObservedRunningTime="2026-01-28 13:10:56.843222016 +0000 UTC m=+1483.755439054" Jan 28 13:10:57 crc kubenswrapper[4848]: I0128 13:10:57.055419 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:10:57 crc kubenswrapper[4848]: I0128 13:10:57.055477 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:10:57 crc kubenswrapper[4848]: I0128 13:10:57.280732 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 13:10:57 crc kubenswrapper[4848]: I0128 13:10:57.318466 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 13:10:57 crc kubenswrapper[4848]: I0128 13:10:57.800388 4848 generic.go:334] "Generic (PLEG): container finished" podID="9e026660-5464-42b9-876f-0d28b39f28fe" containerID="204f3b263bc8aa130403f00a8758ca4664166abddd147dfc8151611f4e69a4dd" exitCode=0 Jan 28 13:10:57 crc kubenswrapper[4848]: I0128 13:10:57.800561 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerDied","Data":"204f3b263bc8aa130403f00a8758ca4664166abddd147dfc8151611f4e69a4dd"} Jan 28 13:10:57 crc kubenswrapper[4848]: I0128 13:10:57.849898 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 13:10:58 crc kubenswrapper[4848]: I0128 13:10:58.096584 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.214:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:10:58 crc kubenswrapper[4848]: I0128 13:10:58.137488 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.214:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:11:00 crc kubenswrapper[4848]: I0128 13:11:00.175465 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 28 13:11:02 crc kubenswrapper[4848]: I0128 13:11:02.857968 4848 generic.go:334] "Generic (PLEG): container finished" podID="9e026660-5464-42b9-876f-0d28b39f28fe" containerID="87c6339dbf0b935c2b4718a1163faa86e98cf90bced8282e8c1f459b3f216529" exitCode=0 Jan 28 13:11:02 crc kubenswrapper[4848]: I0128 13:11:02.864318 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerDied","Data":"87c6339dbf0b935c2b4718a1163faa86e98cf90bced8282e8c1f459b3f216529"} Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.245961 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.332284 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-sg-core-conf-yaml\") pod \"9e026660-5464-42b9-876f-0d28b39f28fe\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.332380 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-log-httpd\") pod \"9e026660-5464-42b9-876f-0d28b39f28fe\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.332473 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-scripts\") pod \"9e026660-5464-42b9-876f-0d28b39f28fe\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.332521 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-combined-ca-bundle\") pod \"9e026660-5464-42b9-876f-0d28b39f28fe\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.333207 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9e026660-5464-42b9-876f-0d28b39f28fe" (UID: "9e026660-5464-42b9-876f-0d28b39f28fe"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.333802 4848 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.341072 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-scripts" (OuterVolumeSpecName: "scripts") pod "9e026660-5464-42b9-876f-0d28b39f28fe" (UID: "9e026660-5464-42b9-876f-0d28b39f28fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.377083 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9e026660-5464-42b9-876f-0d28b39f28fe" (UID: "9e026660-5464-42b9-876f-0d28b39f28fe"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.432221 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e026660-5464-42b9-876f-0d28b39f28fe" (UID: "9e026660-5464-42b9-876f-0d28b39f28fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.434688 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-config-data\") pod \"9e026660-5464-42b9-876f-0d28b39f28fe\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.434757 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-827ts\" (UniqueName: \"kubernetes.io/projected/9e026660-5464-42b9-876f-0d28b39f28fe-kube-api-access-827ts\") pod \"9e026660-5464-42b9-876f-0d28b39f28fe\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.434823 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-run-httpd\") pod \"9e026660-5464-42b9-876f-0d28b39f28fe\" (UID: \"9e026660-5464-42b9-876f-0d28b39f28fe\") " Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.435165 4848 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.436148 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9e026660-5464-42b9-876f-0d28b39f28fe" (UID: "9e026660-5464-42b9-876f-0d28b39f28fe"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.435194 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.436274 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.439329 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e026660-5464-42b9-876f-0d28b39f28fe-kube-api-access-827ts" (OuterVolumeSpecName: "kube-api-access-827ts") pod "9e026660-5464-42b9-876f-0d28b39f28fe" (UID: "9e026660-5464-42b9-876f-0d28b39f28fe"). InnerVolumeSpecName "kube-api-access-827ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.537536 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-827ts\" (UniqueName: \"kubernetes.io/projected/9e026660-5464-42b9-876f-0d28b39f28fe-kube-api-access-827ts\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.537590 4848 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e026660-5464-42b9-876f-0d28b39f28fe-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.547712 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-config-data" (OuterVolumeSpecName: "config-data") pod "9e026660-5464-42b9-876f-0d28b39f28fe" (UID: "9e026660-5464-42b9-876f-0d28b39f28fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.639286 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e026660-5464-42b9-876f-0d28b39f28fe-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.879875 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e026660-5464-42b9-876f-0d28b39f28fe","Type":"ContainerDied","Data":"956755e45932ced0387c19e5089628ed7c83f0fb10bc8556715ced758cef9567"} Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.880172 4848 scope.go:117] "RemoveContainer" containerID="b258cfdb1e9f77810684d68ef1b20418e2f7fc94b2baa31b045707b282dee60f" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.881282 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.923721 4848 scope.go:117] "RemoveContainer" containerID="d70d3131bb5423997c9a4af376dcea61493d0c82157d560a0144fe5638cd2220" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.956030 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.970089 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.971543 4848 scope.go:117] "RemoveContainer" containerID="87c6339dbf0b935c2b4718a1163faa86e98cf90bced8282e8c1f459b3f216529" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.982166 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:03 crc kubenswrapper[4848]: E0128 13:11:03.982689 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="sg-core" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.982710 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="sg-core" Jan 28 13:11:03 crc kubenswrapper[4848]: E0128 13:11:03.982730 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-central-agent" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.982737 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-central-agent" Jan 28 13:11:03 crc kubenswrapper[4848]: E0128 13:11:03.982752 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="proxy-httpd" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.982759 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="proxy-httpd" Jan 28 13:11:03 crc kubenswrapper[4848]: E0128 13:11:03.982784 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-notification-agent" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.982791 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-notification-agent" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.983004 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="sg-core" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.983027 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="proxy-httpd" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.983039 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-central-agent" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.983048 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" containerName="ceilometer-notification-agent" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.985056 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.989050 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.995769 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 13:11:03 crc kubenswrapper[4848]: I0128 13:11:03.996563 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.011248 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.013246 4848 scope.go:117] "RemoveContainer" containerID="204f3b263bc8aa130403f00a8758ca4664166abddd147dfc8151611f4e69a4dd" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.051713 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-run-httpd\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.051774 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.051814 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.052104 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-scripts\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.052577 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.052652 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-config-data\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.052870 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr7j2\" (UniqueName: \"kubernetes.io/projected/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-kube-api-access-fr7j2\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.053095 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-log-httpd\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154144 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-scripts\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154235 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154282 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-config-data\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154324 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fr7j2\" (UniqueName: \"kubernetes.io/projected/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-kube-api-access-fr7j2\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154363 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-log-httpd\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154398 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-run-httpd\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154418 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.154450 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.155577 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-log-httpd\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.155670 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-run-httpd\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.160784 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-config-data\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.160929 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.161105 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-scripts\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.162667 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.174595 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.180632 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr7j2\" (UniqueName: \"kubernetes.io/projected/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-kube-api-access-fr7j2\") pod \"ceilometer-0\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.313425 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.422188 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.758577 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.770288 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-combined-ca-bundle\") pod \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.770629 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-config-data\") pod \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.770810 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fd8j\" (UniqueName: \"kubernetes.io/projected/7c2944fc-8fcf-499b-8e98-8e026c1065d2-kube-api-access-2fd8j\") pod \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\" (UID: \"7c2944fc-8fcf-499b-8e98-8e026c1065d2\") " Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.784909 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c2944fc-8fcf-499b-8e98-8e026c1065d2-kube-api-access-2fd8j" (OuterVolumeSpecName: "kube-api-access-2fd8j") pod "7c2944fc-8fcf-499b-8e98-8e026c1065d2" (UID: "7c2944fc-8fcf-499b-8e98-8e026c1065d2"). InnerVolumeSpecName "kube-api-access-2fd8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.812460 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c2944fc-8fcf-499b-8e98-8e026c1065d2" (UID: "7c2944fc-8fcf-499b-8e98-8e026c1065d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.829208 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-config-data" (OuterVolumeSpecName: "config-data") pod "7c2944fc-8fcf-499b-8e98-8e026c1065d2" (UID: "7c2944fc-8fcf-499b-8e98-8e026c1065d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.873450 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e026660-5464-42b9-876f-0d28b39f28fe" path="/var/lib/kubelet/pods/9e026660-5464-42b9-876f-0d28b39f28fe/volumes" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.873905 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.874009 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fd8j\" (UniqueName: \"kubernetes.io/projected/7c2944fc-8fcf-499b-8e98-8e026c1065d2-kube-api-access-2fd8j\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.874105 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2944fc-8fcf-499b-8e98-8e026c1065d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.894678 4848 generic.go:334] "Generic (PLEG): container finished" podID="7c2944fc-8fcf-499b-8e98-8e026c1065d2" containerID="fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75" exitCode=137 Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.894741 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7c2944fc-8fcf-499b-8e98-8e026c1065d2","Type":"ContainerDied","Data":"fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75"} Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.894771 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7c2944fc-8fcf-499b-8e98-8e026c1065d2","Type":"ContainerDied","Data":"72abe7f680e351a52d891913e5e495b6af61108a4857bda28c4ef91922c64a50"} Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.894791 4848 scope.go:117] "RemoveContainer" containerID="fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.894802 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.939410 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.944112 4848 scope.go:117] "RemoveContainer" containerID="fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75" Jan 28 13:11:04 crc kubenswrapper[4848]: E0128 13:11:04.944845 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75\": container with ID starting with fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75 not found: ID does not exist" containerID="fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.944886 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75"} err="failed to get container status \"fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75\": rpc error: code = NotFound desc = could not find container \"fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75\": container with ID starting with fb227fb9cb3066111313d35e6a243ed6937540f0b812b9fecb65672b89888c75 not found: ID does not exist" Jan 28 13:11:04 crc kubenswrapper[4848]: I0128 13:11:04.947910 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.008273 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.024658 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:11:05 crc kubenswrapper[4848]: E0128 13:11:05.025223 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2944fc-8fcf-499b-8e98-8e026c1065d2" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.025260 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2944fc-8fcf-499b-8e98-8e026c1065d2" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.025512 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c2944fc-8fcf-499b-8e98-8e026c1065d2" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.026527 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.030729 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.030923 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.031512 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.043170 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.184281 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.184391 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndhqv\" (UniqueName: \"kubernetes.io/projected/a524b9a4-fe08-4675-b873-030d31d75a28-kube-api-access-ndhqv\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.184456 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.184507 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.184567 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.287201 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.287362 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.287434 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.287572 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.287637 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndhqv\" (UniqueName: \"kubernetes.io/projected/a524b9a4-fe08-4675-b873-030d31d75a28-kube-api-access-ndhqv\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.294440 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.294881 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.296817 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.296957 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a524b9a4-fe08-4675-b873-030d31d75a28-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.304992 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndhqv\" (UniqueName: \"kubernetes.io/projected/a524b9a4-fe08-4675-b873-030d31d75a28-kube-api-access-ndhqv\") pod \"nova-cell1-novncproxy-0\" (UID: \"a524b9a4-fe08-4675-b873-030d31d75a28\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.317883 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.318594 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.335551 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.378420 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.886949 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 13:11:05 crc kubenswrapper[4848]: W0128 13:11:05.889068 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda524b9a4_fe08_4675_b873_030d31d75a28.slice/crio-dc3e3908df5214d33fd174374376ff0c7175624160e195eee8f379ed47920005 WatchSource:0}: Error finding container dc3e3908df5214d33fd174374376ff0c7175624160e195eee8f379ed47920005: Status 404 returned error can't find the container with id dc3e3908df5214d33fd174374376ff0c7175624160e195eee8f379ed47920005 Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.907168 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a524b9a4-fe08-4675-b873-030d31d75a28","Type":"ContainerStarted","Data":"dc3e3908df5214d33fd174374376ff0c7175624160e195eee8f379ed47920005"} Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.912123 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerStarted","Data":"f167a4225589d0c97e0ae61fa7fe7a7e54e1d9fefc0bf3caf406388d09da773b"} Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.912183 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerStarted","Data":"45378aef4707207b7cd3d01796a4bc93530779373bfc89ce26928606d883fbfd"} Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.912194 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerStarted","Data":"c40fc714b09276d7ebe9a1ba6185b4df668e72c8edf2346efd918e08d1b28e73"} Jan 28 13:11:05 crc kubenswrapper[4848]: I0128 13:11:05.919896 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 13:11:06 crc kubenswrapper[4848]: I0128 13:11:06.864605 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c2944fc-8fcf-499b-8e98-8e026c1065d2" path="/var/lib/kubelet/pods/7c2944fc-8fcf-499b-8e98-8e026c1065d2/volumes" Jan 28 13:11:06 crc kubenswrapper[4848]: I0128 13:11:06.924913 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a524b9a4-fe08-4675-b873-030d31d75a28","Type":"ContainerStarted","Data":"f3e9d3de234cfb91f170220de695021cbd2f048e86de17fafaf47975cbf2c777"} Jan 28 13:11:06 crc kubenswrapper[4848]: I0128 13:11:06.927479 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerStarted","Data":"84d80d0863948447ae836620be1eed5ba0f1565ef90a24ee8d67d7ca64188925"} Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.063695 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.063784 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.065366 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.065392 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.072311 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.073198 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.096006 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.095985603 podStartE2EDuration="3.095985603s" podCreationTimestamp="2026-01-28 13:11:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:11:06.947974538 +0000 UTC m=+1493.860191576" watchObservedRunningTime="2026-01-28 13:11:07.095985603 +0000 UTC m=+1494.008202641" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.346200 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bb56bffb5-dphbx"] Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.348350 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.371046 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb56bffb5-dphbx"] Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.455735 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-svc\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.455858 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-swift-storage-0\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.456018 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgwmk\" (UniqueName: \"kubernetes.io/projected/5e75b70e-a61e-411a-b2ec-40478a36ba48-kube-api-access-cgwmk\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.456094 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-nb\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.456152 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-sb\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.456264 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-config\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.559193 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgwmk\" (UniqueName: \"kubernetes.io/projected/5e75b70e-a61e-411a-b2ec-40478a36ba48-kube-api-access-cgwmk\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.559284 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-nb\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.559312 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-sb\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.559344 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-config\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.559418 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-svc\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.559447 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-swift-storage-0\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.560600 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-swift-storage-0\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.561486 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-nb\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.562050 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-sb\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.562619 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-config\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.563142 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-svc\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.586699 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgwmk\" (UniqueName: \"kubernetes.io/projected/5e75b70e-a61e-411a-b2ec-40478a36ba48-kube-api-access-cgwmk\") pod \"dnsmasq-dns-bb56bffb5-dphbx\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.725577 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.924326 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.924381 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.924434 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.925846 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f2e69a4a3785c5d66035fd792a22c202c11766e978faf8a57dcccebf228af87c"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:11:07 crc kubenswrapper[4848]: I0128 13:11:07.925979 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://f2e69a4a3785c5d66035fd792a22c202c11766e978faf8a57dcccebf228af87c" gracePeriod=600 Jan 28 13:11:08 crc kubenswrapper[4848]: W0128 13:11:08.540270 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e75b70e_a61e_411a_b2ec_40478a36ba48.slice/crio-2b9756d5d5fdeef1f19138b6cdb80565ea1b7488f5cd0d23cd0addbb41133524 WatchSource:0}: Error finding container 2b9756d5d5fdeef1f19138b6cdb80565ea1b7488f5cd0d23cd0addbb41133524: Status 404 returned error can't find the container with id 2b9756d5d5fdeef1f19138b6cdb80565ea1b7488f5cd0d23cd0addbb41133524 Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.560848 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb56bffb5-dphbx"] Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.984869 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerStarted","Data":"0630dfd2b8f0bdcd1b669dc518a719ccffca5993f11adf2d3c7cd8bb634633c0"} Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.984974 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.988156 4848 generic.go:334] "Generic (PLEG): container finished" podID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerID="03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902" exitCode=0 Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.988242 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" event={"ID":"5e75b70e-a61e-411a-b2ec-40478a36ba48","Type":"ContainerDied","Data":"03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902"} Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.988302 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" event={"ID":"5e75b70e-a61e-411a-b2ec-40478a36ba48","Type":"ContainerStarted","Data":"2b9756d5d5fdeef1f19138b6cdb80565ea1b7488f5cd0d23cd0addbb41133524"} Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.997334 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="f2e69a4a3785c5d66035fd792a22c202c11766e978faf8a57dcccebf228af87c" exitCode=0 Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.997418 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"f2e69a4a3785c5d66035fd792a22c202c11766e978faf8a57dcccebf228af87c"} Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.997482 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9"} Jan 28 13:11:08 crc kubenswrapper[4848]: I0128 13:11:08.997513 4848 scope.go:117] "RemoveContainer" containerID="549672e6f36f329f8d879da83cfb4972802790c5bf74c410e2275cf97e32bb6c" Jan 28 13:11:09 crc kubenswrapper[4848]: I0128 13:11:09.028644 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.074963007 podStartE2EDuration="6.028621405s" podCreationTimestamp="2026-01-28 13:11:03 +0000 UTC" firstStartedPulling="2026-01-28 13:11:04.968121618 +0000 UTC m=+1491.880338646" lastFinishedPulling="2026-01-28 13:11:07.921780006 +0000 UTC m=+1494.833997044" observedRunningTime="2026-01-28 13:11:09.009530719 +0000 UTC m=+1495.921747757" watchObservedRunningTime="2026-01-28 13:11:09.028621405 +0000 UTC m=+1495.940838443" Jan 28 13:11:10 crc kubenswrapper[4848]: I0128 13:11:10.012063 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" event={"ID":"5e75b70e-a61e-411a-b2ec-40478a36ba48","Type":"ContainerStarted","Data":"8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745"} Jan 28 13:11:10 crc kubenswrapper[4848]: I0128 13:11:10.012863 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:10 crc kubenswrapper[4848]: I0128 13:11:10.034763 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" podStartSLOduration=3.03472779 podStartE2EDuration="3.03472779s" podCreationTimestamp="2026-01-28 13:11:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:11:10.031353858 +0000 UTC m=+1496.943570906" watchObservedRunningTime="2026-01-28 13:11:10.03472779 +0000 UTC m=+1496.946944828" Jan 28 13:11:10 crc kubenswrapper[4848]: I0128 13:11:10.344559 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:10 crc kubenswrapper[4848]: I0128 13:11:10.344863 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-log" containerID="cri-o://728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9" gracePeriod=30 Jan 28 13:11:10 crc kubenswrapper[4848]: I0128 13:11:10.345678 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-api" containerID="cri-o://391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c" gracePeriod=30 Jan 28 13:11:10 crc kubenswrapper[4848]: I0128 13:11:10.379737 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:10 crc kubenswrapper[4848]: E0128 13:11:10.666369 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb68aad89_78f2_4015_8794_9614c759cc4a.slice/crio-conmon-728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:11:11 crc kubenswrapper[4848]: I0128 13:11:11.041522 4848 generic.go:334] "Generic (PLEG): container finished" podID="b68aad89-78f2-4015-8794-9614c759cc4a" containerID="728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9" exitCode=143 Jan 28 13:11:11 crc kubenswrapper[4848]: I0128 13:11:11.042676 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b68aad89-78f2-4015-8794-9614c759cc4a","Type":"ContainerDied","Data":"728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9"} Jan 28 13:11:11 crc kubenswrapper[4848]: I0128 13:11:11.054484 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:11 crc kubenswrapper[4848]: I0128 13:11:11.055281 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-central-agent" containerID="cri-o://45378aef4707207b7cd3d01796a4bc93530779373bfc89ce26928606d883fbfd" gracePeriod=30 Jan 28 13:11:11 crc kubenswrapper[4848]: I0128 13:11:11.055369 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="sg-core" containerID="cri-o://84d80d0863948447ae836620be1eed5ba0f1565ef90a24ee8d67d7ca64188925" gracePeriod=30 Jan 28 13:11:11 crc kubenswrapper[4848]: I0128 13:11:11.055588 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-notification-agent" containerID="cri-o://f167a4225589d0c97e0ae61fa7fe7a7e54e1d9fefc0bf3caf406388d09da773b" gracePeriod=30 Jan 28 13:11:11 crc kubenswrapper[4848]: I0128 13:11:11.055340 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="proxy-httpd" containerID="cri-o://0630dfd2b8f0bdcd1b669dc518a719ccffca5993f11adf2d3c7cd8bb634633c0" gracePeriod=30 Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.066587 4848 generic.go:334] "Generic (PLEG): container finished" podID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerID="0630dfd2b8f0bdcd1b669dc518a719ccffca5993f11adf2d3c7cd8bb634633c0" exitCode=0 Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.067426 4848 generic.go:334] "Generic (PLEG): container finished" podID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerID="84d80d0863948447ae836620be1eed5ba0f1565ef90a24ee8d67d7ca64188925" exitCode=2 Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.067435 4848 generic.go:334] "Generic (PLEG): container finished" podID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerID="f167a4225589d0c97e0ae61fa7fe7a7e54e1d9fefc0bf3caf406388d09da773b" exitCode=0 Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.067422 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerDied","Data":"0630dfd2b8f0bdcd1b669dc518a719ccffca5993f11adf2d3c7cd8bb634633c0"} Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.067494 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerDied","Data":"84d80d0863948447ae836620be1eed5ba0f1565ef90a24ee8d67d7ca64188925"} Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.067512 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerDied","Data":"f167a4225589d0c97e0ae61fa7fe7a7e54e1d9fefc0bf3caf406388d09da773b"} Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.506442 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.632096 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-config-data\") pod \"b68aad89-78f2-4015-8794-9614c759cc4a\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.638836 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b68aad89-78f2-4015-8794-9614c759cc4a-logs\") pod \"b68aad89-78f2-4015-8794-9614c759cc4a\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.638885 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kjrr\" (UniqueName: \"kubernetes.io/projected/b68aad89-78f2-4015-8794-9614c759cc4a-kube-api-access-7kjrr\") pod \"b68aad89-78f2-4015-8794-9614c759cc4a\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.638945 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-combined-ca-bundle\") pod \"b68aad89-78f2-4015-8794-9614c759cc4a\" (UID: \"b68aad89-78f2-4015-8794-9614c759cc4a\") " Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.639606 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b68aad89-78f2-4015-8794-9614c759cc4a-logs" (OuterVolumeSpecName: "logs") pod "b68aad89-78f2-4015-8794-9614c759cc4a" (UID: "b68aad89-78f2-4015-8794-9614c759cc4a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.640010 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b68aad89-78f2-4015-8794-9614c759cc4a-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.644909 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b68aad89-78f2-4015-8794-9614c759cc4a-kube-api-access-7kjrr" (OuterVolumeSpecName: "kube-api-access-7kjrr") pod "b68aad89-78f2-4015-8794-9614c759cc4a" (UID: "b68aad89-78f2-4015-8794-9614c759cc4a"). InnerVolumeSpecName "kube-api-access-7kjrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.679520 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-config-data" (OuterVolumeSpecName: "config-data") pod "b68aad89-78f2-4015-8794-9614c759cc4a" (UID: "b68aad89-78f2-4015-8794-9614c759cc4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.684347 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b68aad89-78f2-4015-8794-9614c759cc4a" (UID: "b68aad89-78f2-4015-8794-9614c759cc4a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.742645 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.742765 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kjrr\" (UniqueName: \"kubernetes.io/projected/b68aad89-78f2-4015-8794-9614c759cc4a-kube-api-access-7kjrr\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:12 crc kubenswrapper[4848]: I0128 13:11:12.742788 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68aad89-78f2-4015-8794-9614c759cc4a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.102989 4848 generic.go:334] "Generic (PLEG): container finished" podID="b68aad89-78f2-4015-8794-9614c759cc4a" containerID="391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c" exitCode=0 Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.103083 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b68aad89-78f2-4015-8794-9614c759cc4a","Type":"ContainerDied","Data":"391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c"} Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.103170 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b68aad89-78f2-4015-8794-9614c759cc4a","Type":"ContainerDied","Data":"fd57e4ded9bb7bdc08952f0b5195a4930aec4b6b17b4595f742a9b5ca3bcd863"} Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.103195 4848 scope.go:117] "RemoveContainer" containerID="391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.103105 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.155418 4848 scope.go:117] "RemoveContainer" containerID="728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.155592 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.176950 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.191953 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:13 crc kubenswrapper[4848]: E0128 13:11:13.192557 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-log" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.192580 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-log" Jan 28 13:11:13 crc kubenswrapper[4848]: E0128 13:11:13.192592 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-api" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.192599 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-api" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.192822 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-log" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.192844 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" containerName="nova-api-api" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.194056 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.196600 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.201531 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.201640 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.221300 4848 scope.go:117] "RemoveContainer" containerID="391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c" Jan 28 13:11:13 crc kubenswrapper[4848]: E0128 13:11:13.226118 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c\": container with ID starting with 391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c not found: ID does not exist" containerID="391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.226167 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c"} err="failed to get container status \"391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c\": rpc error: code = NotFound desc = could not find container \"391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c\": container with ID starting with 391359a0b817b9d01aae8b7c30547da28a90bb30086c76b295dbe41dc7fe7e4c not found: ID does not exist" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.226197 4848 scope.go:117] "RemoveContainer" containerID="728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9" Jan 28 13:11:13 crc kubenswrapper[4848]: E0128 13:11:13.226888 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9\": container with ID starting with 728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9 not found: ID does not exist" containerID="728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.226917 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9"} err="failed to get container status \"728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9\": rpc error: code = NotFound desc = could not find container \"728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9\": container with ID starting with 728d47e1799a4300b736776ad9151c35325935eba1356a5d19be4458944aa3d9 not found: ID does not exist" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.234944 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.355326 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.355626 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-internal-tls-certs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.355684 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61944b33-413d-4448-9f84-f2a72079bf65-logs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.355728 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-config-data\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.355796 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sxqd\" (UniqueName: \"kubernetes.io/projected/61944b33-413d-4448-9f84-f2a72079bf65-kube-api-access-9sxqd\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.355817 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-public-tls-certs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.457819 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-config-data\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.457931 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sxqd\" (UniqueName: \"kubernetes.io/projected/61944b33-413d-4448-9f84-f2a72079bf65-kube-api-access-9sxqd\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.457954 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-public-tls-certs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.458044 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.458070 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-internal-tls-certs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.458104 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61944b33-413d-4448-9f84-f2a72079bf65-logs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.458654 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61944b33-413d-4448-9f84-f2a72079bf65-logs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.466189 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-public-tls-certs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.466191 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-internal-tls-certs\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.467195 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.473370 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-config-data\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.486383 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sxqd\" (UniqueName: \"kubernetes.io/projected/61944b33-413d-4448-9f84-f2a72079bf65-kube-api-access-9sxqd\") pod \"nova-api-0\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " pod="openstack/nova-api-0" Jan 28 13:11:13 crc kubenswrapper[4848]: I0128 13:11:13.530048 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:14 crc kubenswrapper[4848]: I0128 13:11:14.070538 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:14 crc kubenswrapper[4848]: I0128 13:11:14.115002 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"61944b33-413d-4448-9f84-f2a72079bf65","Type":"ContainerStarted","Data":"77aa1f0b14d0d77b166dbbe44351dad280140b4ce016fb4a4d305cd9e6d0b87a"} Jan 28 13:11:14 crc kubenswrapper[4848]: I0128 13:11:14.877344 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b68aad89-78f2-4015-8794-9614c759cc4a" path="/var/lib/kubelet/pods/b68aad89-78f2-4015-8794-9614c759cc4a/volumes" Jan 28 13:11:15 crc kubenswrapper[4848]: I0128 13:11:15.129446 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"61944b33-413d-4448-9f84-f2a72079bf65","Type":"ContainerStarted","Data":"8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3"} Jan 28 13:11:15 crc kubenswrapper[4848]: I0128 13:11:15.129501 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"61944b33-413d-4448-9f84-f2a72079bf65","Type":"ContainerStarted","Data":"c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f"} Jan 28 13:11:15 crc kubenswrapper[4848]: I0128 13:11:15.154005 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.153983073 podStartE2EDuration="2.153983073s" podCreationTimestamp="2026-01-28 13:11:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:11:15.152631686 +0000 UTC m=+1502.064848724" watchObservedRunningTime="2026-01-28 13:11:15.153983073 +0000 UTC m=+1502.066200111" Jan 28 13:11:15 crc kubenswrapper[4848]: I0128 13:11:15.380050 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:15 crc kubenswrapper[4848]: I0128 13:11:15.399522 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.163264 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.331098 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-vzjwq"] Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.332923 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.339029 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.339528 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.354656 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-vzjwq"] Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.438834 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-config-data\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.439459 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.440031 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-scripts\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.440276 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgxgm\" (UniqueName: \"kubernetes.io/projected/de8237ad-c0ce-4a60-a455-1ff2c36ad531-kube-api-access-xgxgm\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.542941 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-scripts\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.543080 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgxgm\" (UniqueName: \"kubernetes.io/projected/de8237ad-c0ce-4a60-a455-1ff2c36ad531-kube-api-access-xgxgm\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.543194 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-config-data\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.543241 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.550420 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.551268 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-config-data\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.554772 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-scripts\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.568773 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgxgm\" (UniqueName: \"kubernetes.io/projected/de8237ad-c0ce-4a60-a455-1ff2c36ad531-kube-api-access-xgxgm\") pod \"nova-cell1-cell-mapping-vzjwq\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:16 crc kubenswrapper[4848]: I0128 13:11:16.663573 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.156519 4848 generic.go:334] "Generic (PLEG): container finished" podID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerID="45378aef4707207b7cd3d01796a4bc93530779373bfc89ce26928606d883fbfd" exitCode=0 Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.156599 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerDied","Data":"45378aef4707207b7cd3d01796a4bc93530779373bfc89ce26928606d883fbfd"} Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.208136 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-vzjwq"] Jan 28 13:11:17 crc kubenswrapper[4848]: W0128 13:11:17.217705 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde8237ad_c0ce_4a60_a455_1ff2c36ad531.slice/crio-d068505edef09e6223bf5f0bcd4daf8409e30f8a842e387100d6fb261fba3550 WatchSource:0}: Error finding container d068505edef09e6223bf5f0bcd4daf8409e30f8a842e387100d6fb261fba3550: Status 404 returned error can't find the container with id d068505edef09e6223bf5f0bcd4daf8409e30f8a842e387100d6fb261fba3550 Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.227989 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.361890 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fr7j2\" (UniqueName: \"kubernetes.io/projected/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-kube-api-access-fr7j2\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362427 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-sg-core-conf-yaml\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362553 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-run-httpd\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362599 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-ceilometer-tls-certs\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362636 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-log-httpd\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362697 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-config-data\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362910 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-scripts\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362920 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.362958 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-combined-ca-bundle\") pod \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\" (UID: \"ddbe1b2a-5850-4549-a411-96f9c44c0a4f\") " Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.363081 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.363591 4848 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.363614 4848 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.367259 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-kube-api-access-fr7j2" (OuterVolumeSpecName: "kube-api-access-fr7j2") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "kube-api-access-fr7j2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.367968 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-scripts" (OuterVolumeSpecName: "scripts") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.393028 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.447807 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.465516 4848 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.465564 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.465580 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fr7j2\" (UniqueName: \"kubernetes.io/projected/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-kube-api-access-fr7j2\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.465593 4848 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.468595 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.520537 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-config-data" (OuterVolumeSpecName: "config-data") pod "ddbe1b2a-5850-4549-a411-96f9c44c0a4f" (UID: "ddbe1b2a-5850-4549-a411-96f9c44c0a4f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.568631 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.568713 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbe1b2a-5850-4549-a411-96f9c44c0a4f-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.727476 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.837084 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54bdc65d45-fctrd"] Jan 28 13:11:17 crc kubenswrapper[4848]: I0128 13:11:17.837390 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" podUID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerName="dnsmasq-dns" containerID="cri-o://37e2d30577d02770a0a1e5f2df69b6c17b1b47617f5053830f24d6d5aa7c0241" gracePeriod=10 Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.173746 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vzjwq" event={"ID":"de8237ad-c0ce-4a60-a455-1ff2c36ad531","Type":"ContainerStarted","Data":"a2731cfa4f10d9756bd019e9f52666bfec9448bbc9b6769779d475d3784ca8af"} Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.174293 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vzjwq" event={"ID":"de8237ad-c0ce-4a60-a455-1ff2c36ad531","Type":"ContainerStarted","Data":"d068505edef09e6223bf5f0bcd4daf8409e30f8a842e387100d6fb261fba3550"} Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.178459 4848 generic.go:334] "Generic (PLEG): container finished" podID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerID="37e2d30577d02770a0a1e5f2df69b6c17b1b47617f5053830f24d6d5aa7c0241" exitCode=0 Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.178528 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" event={"ID":"86b85bd6-6e73-4373-b9bc-faea340b9e24","Type":"ContainerDied","Data":"37e2d30577d02770a0a1e5f2df69b6c17b1b47617f5053830f24d6d5aa7c0241"} Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.184407 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddbe1b2a-5850-4549-a411-96f9c44c0a4f","Type":"ContainerDied","Data":"c40fc714b09276d7ebe9a1ba6185b4df668e72c8edf2346efd918e08d1b28e73"} Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.184483 4848 scope.go:117] "RemoveContainer" containerID="0630dfd2b8f0bdcd1b669dc518a719ccffca5993f11adf2d3c7cd8bb634633c0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.184491 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.194810 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-vzjwq" podStartSLOduration=2.19478484 podStartE2EDuration="2.19478484s" podCreationTimestamp="2026-01-28 13:11:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:11:18.193128774 +0000 UTC m=+1505.105345812" watchObservedRunningTime="2026-01-28 13:11:18.19478484 +0000 UTC m=+1505.107001878" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.224580 4848 scope.go:117] "RemoveContainer" containerID="84d80d0863948447ae836620be1eed5ba0f1565ef90a24ee8d67d7ca64188925" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.259472 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.259971 4848 scope.go:117] "RemoveContainer" containerID="f167a4225589d0c97e0ae61fa7fe7a7e54e1d9fefc0bf3caf406388d09da773b" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.284920 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.301314 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:18 crc kubenswrapper[4848]: E0128 13:11:18.301974 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="proxy-httpd" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.301994 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="proxy-httpd" Jan 28 13:11:18 crc kubenswrapper[4848]: E0128 13:11:18.302014 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-notification-agent" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.302023 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-notification-agent" Jan 28 13:11:18 crc kubenswrapper[4848]: E0128 13:11:18.302069 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="sg-core" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.302078 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="sg-core" Jan 28 13:11:18 crc kubenswrapper[4848]: E0128 13:11:18.302095 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-central-agent" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.302103 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-central-agent" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.302723 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="proxy-httpd" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.302753 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-notification-agent" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.302772 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="sg-core" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.302793 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" containerName="ceilometer-central-agent" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.305635 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.312901 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.315010 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.316024 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.318517 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.334930 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d27898c6-95da-4dfc-908d-36876a776c2d-run-httpd\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.335060 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-config-data\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.335210 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d27898c6-95da-4dfc-908d-36876a776c2d-log-httpd\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.335537 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.335606 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-scripts\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.335630 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.335668 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.335752 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-554zp\" (UniqueName: \"kubernetes.io/projected/d27898c6-95da-4dfc-908d-36876a776c2d-kube-api-access-554zp\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.358555 4848 scope.go:117] "RemoveContainer" containerID="45378aef4707207b7cd3d01796a4bc93530779373bfc89ce26928606d883fbfd" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437665 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d27898c6-95da-4dfc-908d-36876a776c2d-log-httpd\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437729 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437760 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-scripts\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437783 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437799 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437886 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-554zp\" (UniqueName: \"kubernetes.io/projected/d27898c6-95da-4dfc-908d-36876a776c2d-kube-api-access-554zp\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437931 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d27898c6-95da-4dfc-908d-36876a776c2d-run-httpd\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.437997 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-config-data\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.439963 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d27898c6-95da-4dfc-908d-36876a776c2d-run-httpd\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.440573 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d27898c6-95da-4dfc-908d-36876a776c2d-log-httpd\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.448041 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.448165 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-scripts\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.448267 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.448447 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-config-data\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.452107 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27898c6-95da-4dfc-908d-36876a776c2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.460759 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-554zp\" (UniqueName: \"kubernetes.io/projected/d27898c6-95da-4dfc-908d-36876a776c2d-kube-api-access-554zp\") pod \"ceilometer-0\" (UID: \"d27898c6-95da-4dfc-908d-36876a776c2d\") " pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.604618 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.641755 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-config\") pod \"86b85bd6-6e73-4373-b9bc-faea340b9e24\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.642532 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-swift-storage-0\") pod \"86b85bd6-6e73-4373-b9bc-faea340b9e24\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.642579 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-svc\") pod \"86b85bd6-6e73-4373-b9bc-faea340b9e24\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.642645 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-sb\") pod \"86b85bd6-6e73-4373-b9bc-faea340b9e24\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.642783 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-nb\") pod \"86b85bd6-6e73-4373-b9bc-faea340b9e24\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.642983 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlbfs\" (UniqueName: \"kubernetes.io/projected/86b85bd6-6e73-4373-b9bc-faea340b9e24-kube-api-access-rlbfs\") pod \"86b85bd6-6e73-4373-b9bc-faea340b9e24\" (UID: \"86b85bd6-6e73-4373-b9bc-faea340b9e24\") " Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.648410 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.677582 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86b85bd6-6e73-4373-b9bc-faea340b9e24-kube-api-access-rlbfs" (OuterVolumeSpecName: "kube-api-access-rlbfs") pod "86b85bd6-6e73-4373-b9bc-faea340b9e24" (UID: "86b85bd6-6e73-4373-b9bc-faea340b9e24"). InnerVolumeSpecName "kube-api-access-rlbfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.739597 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "86b85bd6-6e73-4373-b9bc-faea340b9e24" (UID: "86b85bd6-6e73-4373-b9bc-faea340b9e24"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.742361 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "86b85bd6-6e73-4373-b9bc-faea340b9e24" (UID: "86b85bd6-6e73-4373-b9bc-faea340b9e24"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.749047 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.749117 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.749156 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlbfs\" (UniqueName: \"kubernetes.io/projected/86b85bd6-6e73-4373-b9bc-faea340b9e24-kube-api-access-rlbfs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.755279 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "86b85bd6-6e73-4373-b9bc-faea340b9e24" (UID: "86b85bd6-6e73-4373-b9bc-faea340b9e24"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.766836 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "86b85bd6-6e73-4373-b9bc-faea340b9e24" (UID: "86b85bd6-6e73-4373-b9bc-faea340b9e24"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.808390 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-config" (OuterVolumeSpecName: "config") pod "86b85bd6-6e73-4373-b9bc-faea340b9e24" (UID: "86b85bd6-6e73-4373-b9bc-faea340b9e24"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.851045 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.851096 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.851116 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86b85bd6-6e73-4373-b9bc-faea340b9e24-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:18 crc kubenswrapper[4848]: I0128 13:11:18.863424 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddbe1b2a-5850-4549-a411-96f9c44c0a4f" path="/var/lib/kubelet/pods/ddbe1b2a-5850-4549-a411-96f9c44c0a4f/volumes" Jan 28 13:11:19 crc kubenswrapper[4848]: I0128 13:11:19.186051 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 13:11:19 crc kubenswrapper[4848]: I0128 13:11:19.214609 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" event={"ID":"86b85bd6-6e73-4373-b9bc-faea340b9e24","Type":"ContainerDied","Data":"43c04236108e02f321921b522e2b9fc752cecdfb178e794d01c48b16b1812f35"} Jan 28 13:11:19 crc kubenswrapper[4848]: I0128 13:11:19.214695 4848 scope.go:117] "RemoveContainer" containerID="37e2d30577d02770a0a1e5f2df69b6c17b1b47617f5053830f24d6d5aa7c0241" Jan 28 13:11:19 crc kubenswrapper[4848]: I0128 13:11:19.214862 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54bdc65d45-fctrd" Jan 28 13:11:19 crc kubenswrapper[4848]: I0128 13:11:19.287663 4848 scope.go:117] "RemoveContainer" containerID="8550601ae5964770372b7e5e5b357451d9b54d55944984d2a9ae3baef64b05f5" Jan 28 13:11:19 crc kubenswrapper[4848]: I0128 13:11:19.291462 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54bdc65d45-fctrd"] Jan 28 13:11:19 crc kubenswrapper[4848]: I0128 13:11:19.303629 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54bdc65d45-fctrd"] Jan 28 13:11:20 crc kubenswrapper[4848]: I0128 13:11:20.245085 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d27898c6-95da-4dfc-908d-36876a776c2d","Type":"ContainerStarted","Data":"3f65f990bb9614397f1cbab28982935eef67eaf0afbf20aa5b04716282199cef"} Jan 28 13:11:20 crc kubenswrapper[4848]: I0128 13:11:20.245479 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d27898c6-95da-4dfc-908d-36876a776c2d","Type":"ContainerStarted","Data":"6dec3ca24fa731134fd89fd23f7ec8c8acfcf3118046d7dd7e3faf30c692bdcc"} Jan 28 13:11:20 crc kubenswrapper[4848]: I0128 13:11:20.245492 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d27898c6-95da-4dfc-908d-36876a776c2d","Type":"ContainerStarted","Data":"a8a90deca58e58b021521bfa85b96337594e7de9052594603f735c6da8a0640b"} Jan 28 13:11:20 crc kubenswrapper[4848]: I0128 13:11:20.863046 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86b85bd6-6e73-4373-b9bc-faea340b9e24" path="/var/lib/kubelet/pods/86b85bd6-6e73-4373-b9bc-faea340b9e24/volumes" Jan 28 13:11:21 crc kubenswrapper[4848]: I0128 13:11:21.262487 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d27898c6-95da-4dfc-908d-36876a776c2d","Type":"ContainerStarted","Data":"eb0c47455b3e64e352a7b6d90af21046eec100595921dac5e219a06c2c27e88a"} Jan 28 13:11:23 crc kubenswrapper[4848]: I0128 13:11:23.296840 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d27898c6-95da-4dfc-908d-36876a776c2d","Type":"ContainerStarted","Data":"823bd410635be2e89e44fd8098d70afc551f0ce5c5b9c18f02483f76d830299f"} Jan 28 13:11:23 crc kubenswrapper[4848]: I0128 13:11:23.297011 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 13:11:23 crc kubenswrapper[4848]: I0128 13:11:23.346420 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.475157414 podStartE2EDuration="5.346397953s" podCreationTimestamp="2026-01-28 13:11:18 +0000 UTC" firstStartedPulling="2026-01-28 13:11:19.199969721 +0000 UTC m=+1506.112186779" lastFinishedPulling="2026-01-28 13:11:22.07121028 +0000 UTC m=+1508.983427318" observedRunningTime="2026-01-28 13:11:23.332788168 +0000 UTC m=+1510.245005226" watchObservedRunningTime="2026-01-28 13:11:23.346397953 +0000 UTC m=+1510.258614991" Jan 28 13:11:23 crc kubenswrapper[4848]: I0128 13:11:23.531320 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:11:23 crc kubenswrapper[4848]: I0128 13:11:23.531388 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:11:24 crc kubenswrapper[4848]: I0128 13:11:24.309979 4848 generic.go:334] "Generic (PLEG): container finished" podID="de8237ad-c0ce-4a60-a455-1ff2c36ad531" containerID="a2731cfa4f10d9756bd019e9f52666bfec9448bbc9b6769779d475d3784ca8af" exitCode=0 Jan 28 13:11:24 crc kubenswrapper[4848]: I0128 13:11:24.310083 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vzjwq" event={"ID":"de8237ad-c0ce-4a60-a455-1ff2c36ad531","Type":"ContainerDied","Data":"a2731cfa4f10d9756bd019e9f52666bfec9448bbc9b6769779d475d3784ca8af"} Jan 28 13:11:24 crc kubenswrapper[4848]: I0128 13:11:24.550698 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.221:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:11:24 crc kubenswrapper[4848]: I0128 13:11:24.550721 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.221:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.754842 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.760478 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-config-data\") pod \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.760818 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-combined-ca-bundle\") pod \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.760923 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-scripts\") pod \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.760977 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgxgm\" (UniqueName: \"kubernetes.io/projected/de8237ad-c0ce-4a60-a455-1ff2c36ad531-kube-api-access-xgxgm\") pod \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\" (UID: \"de8237ad-c0ce-4a60-a455-1ff2c36ad531\") " Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.768961 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-scripts" (OuterVolumeSpecName: "scripts") pod "de8237ad-c0ce-4a60-a455-1ff2c36ad531" (UID: "de8237ad-c0ce-4a60-a455-1ff2c36ad531"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.770979 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de8237ad-c0ce-4a60-a455-1ff2c36ad531-kube-api-access-xgxgm" (OuterVolumeSpecName: "kube-api-access-xgxgm") pod "de8237ad-c0ce-4a60-a455-1ff2c36ad531" (UID: "de8237ad-c0ce-4a60-a455-1ff2c36ad531"). InnerVolumeSpecName "kube-api-access-xgxgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.828149 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-config-data" (OuterVolumeSpecName: "config-data") pod "de8237ad-c0ce-4a60-a455-1ff2c36ad531" (UID: "de8237ad-c0ce-4a60-a455-1ff2c36ad531"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.857454 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de8237ad-c0ce-4a60-a455-1ff2c36ad531" (UID: "de8237ad-c0ce-4a60-a455-1ff2c36ad531"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.866775 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.866819 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.866832 4848 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de8237ad-c0ce-4a60-a455-1ff2c36ad531-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:25 crc kubenswrapper[4848]: I0128 13:11:25.866844 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgxgm\" (UniqueName: \"kubernetes.io/projected/de8237ad-c0ce-4a60-a455-1ff2c36ad531-kube-api-access-xgxgm\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.339662 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vzjwq" Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.339533 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vzjwq" event={"ID":"de8237ad-c0ce-4a60-a455-1ff2c36ad531","Type":"ContainerDied","Data":"d068505edef09e6223bf5f0bcd4daf8409e30f8a842e387100d6fb261fba3550"} Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.340930 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d068505edef09e6223bf5f0bcd4daf8409e30f8a842e387100d6fb261fba3550" Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.554703 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.555063 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-log" containerID="cri-o://c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f" gracePeriod=30 Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.555193 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-api" containerID="cri-o://8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3" gracePeriod=30 Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.582578 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.582986 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" containerName="nova-scheduler-scheduler" containerID="cri-o://0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" gracePeriod=30 Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.622310 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.622585 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-log" containerID="cri-o://07b290654e2289aa63a23d07a3307330e7aaf26616ee57256f1df9eef41380c8" gracePeriod=30 Jan 28 13:11:26 crc kubenswrapper[4848]: I0128 13:11:26.623011 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-metadata" containerID="cri-o://82295e29aef7c2f2a5b64a93dde3154d43cb7845184884fd0e2fab317ceb219a" gracePeriod=30 Jan 28 13:11:27 crc kubenswrapper[4848]: E0128 13:11:27.282766 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 13:11:27 crc kubenswrapper[4848]: E0128 13:11:27.286746 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 13:11:27 crc kubenswrapper[4848]: E0128 13:11:27.288058 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 13:11:27 crc kubenswrapper[4848]: E0128 13:11:27.288091 4848 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" containerName="nova-scheduler-scheduler" Jan 28 13:11:27 crc kubenswrapper[4848]: I0128 13:11:27.361152 4848 generic.go:334] "Generic (PLEG): container finished" podID="61944b33-413d-4448-9f84-f2a72079bf65" containerID="c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f" exitCode=143 Jan 28 13:11:27 crc kubenswrapper[4848]: I0128 13:11:27.361261 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"61944b33-413d-4448-9f84-f2a72079bf65","Type":"ContainerDied","Data":"c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f"} Jan 28 13:11:27 crc kubenswrapper[4848]: I0128 13:11:27.365382 4848 generic.go:334] "Generic (PLEG): container finished" podID="10e136b8-b94a-47d4-ad21-0646f193e596" containerID="07b290654e2289aa63a23d07a3307330e7aaf26616ee57256f1df9eef41380c8" exitCode=143 Jan 28 13:11:27 crc kubenswrapper[4848]: I0128 13:11:27.365453 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10e136b8-b94a-47d4-ad21-0646f193e596","Type":"ContainerDied","Data":"07b290654e2289aa63a23d07a3307330e7aaf26616ee57256f1df9eef41380c8"} Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.391315 4848 generic.go:334] "Generic (PLEG): container finished" podID="10e136b8-b94a-47d4-ad21-0646f193e596" containerID="82295e29aef7c2f2a5b64a93dde3154d43cb7845184884fd0e2fab317ceb219a" exitCode=0 Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.391418 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10e136b8-b94a-47d4-ad21-0646f193e596","Type":"ContainerDied","Data":"82295e29aef7c2f2a5b64a93dde3154d43cb7845184884fd0e2fab317ceb219a"} Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.685766 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.741578 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzdqn\" (UniqueName: \"kubernetes.io/projected/10e136b8-b94a-47d4-ad21-0646f193e596-kube-api-access-vzdqn\") pod \"10e136b8-b94a-47d4-ad21-0646f193e596\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.741648 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-config-data\") pod \"10e136b8-b94a-47d4-ad21-0646f193e596\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.741704 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10e136b8-b94a-47d4-ad21-0646f193e596-logs\") pod \"10e136b8-b94a-47d4-ad21-0646f193e596\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.741773 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-nova-metadata-tls-certs\") pod \"10e136b8-b94a-47d4-ad21-0646f193e596\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.741838 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-combined-ca-bundle\") pod \"10e136b8-b94a-47d4-ad21-0646f193e596\" (UID: \"10e136b8-b94a-47d4-ad21-0646f193e596\") " Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.742712 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10e136b8-b94a-47d4-ad21-0646f193e596-logs" (OuterVolumeSpecName: "logs") pod "10e136b8-b94a-47d4-ad21-0646f193e596" (UID: "10e136b8-b94a-47d4-ad21-0646f193e596"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.767591 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10e136b8-b94a-47d4-ad21-0646f193e596-kube-api-access-vzdqn" (OuterVolumeSpecName: "kube-api-access-vzdqn") pod "10e136b8-b94a-47d4-ad21-0646f193e596" (UID: "10e136b8-b94a-47d4-ad21-0646f193e596"). InnerVolumeSpecName "kube-api-access-vzdqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.775201 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10e136b8-b94a-47d4-ad21-0646f193e596" (UID: "10e136b8-b94a-47d4-ad21-0646f193e596"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.805709 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-config-data" (OuterVolumeSpecName: "config-data") pod "10e136b8-b94a-47d4-ad21-0646f193e596" (UID: "10e136b8-b94a-47d4-ad21-0646f193e596"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.825801 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "10e136b8-b94a-47d4-ad21-0646f193e596" (UID: "10e136b8-b94a-47d4-ad21-0646f193e596"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.848329 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzdqn\" (UniqueName: \"kubernetes.io/projected/10e136b8-b94a-47d4-ad21-0646f193e596-kube-api-access-vzdqn\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.848388 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.848403 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10e136b8-b94a-47d4-ad21-0646f193e596-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.848416 4848 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:28 crc kubenswrapper[4848]: I0128 13:11:28.848427 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10e136b8-b94a-47d4-ad21-0646f193e596-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.408382 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10e136b8-b94a-47d4-ad21-0646f193e596","Type":"ContainerDied","Data":"60fe11837a6a881304b265345203fa544e70c316b2300aaa5c7e55b46dc29f10"} Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.408467 4848 scope.go:117] "RemoveContainer" containerID="82295e29aef7c2f2a5b64a93dde3154d43cb7845184884fd0e2fab317ceb219a" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.408481 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.452311 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.455277 4848 scope.go:117] "RemoveContainer" containerID="07b290654e2289aa63a23d07a3307330e7aaf26616ee57256f1df9eef41380c8" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.474331 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.490460 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:11:29 crc kubenswrapper[4848]: E0128 13:11:29.491025 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-log" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491045 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-log" Jan 28 13:11:29 crc kubenswrapper[4848]: E0128 13:11:29.491059 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de8237ad-c0ce-4a60-a455-1ff2c36ad531" containerName="nova-manage" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491065 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="de8237ad-c0ce-4a60-a455-1ff2c36ad531" containerName="nova-manage" Jan 28 13:11:29 crc kubenswrapper[4848]: E0128 13:11:29.491076 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerName="init" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491083 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerName="init" Jan 28 13:11:29 crc kubenswrapper[4848]: E0128 13:11:29.491095 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerName="dnsmasq-dns" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491102 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerName="dnsmasq-dns" Jan 28 13:11:29 crc kubenswrapper[4848]: E0128 13:11:29.491117 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-metadata" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491124 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-metadata" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491378 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="86b85bd6-6e73-4373-b9bc-faea340b9e24" containerName="dnsmasq-dns" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491395 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-metadata" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491423 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="de8237ad-c0ce-4a60-a455-1ff2c36ad531" containerName="nova-manage" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.491434 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" containerName="nova-metadata-log" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.492727 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.496416 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.496935 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.530908 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.565655 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsv74\" (UniqueName: \"kubernetes.io/projected/6c953264-454e-4949-906c-25378e467ab4-kube-api-access-xsv74\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.565730 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c953264-454e-4949-906c-25378e467ab4-logs\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.565763 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-config-data\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.565799 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.565877 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.667834 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsv74\" (UniqueName: \"kubernetes.io/projected/6c953264-454e-4949-906c-25378e467ab4-kube-api-access-xsv74\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.667918 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c953264-454e-4949-906c-25378e467ab4-logs\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.667961 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-config-data\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.668011 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.668083 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.668625 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c953264-454e-4949-906c-25378e467ab4-logs\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.677140 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-config-data\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.679268 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.680918 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c953264-454e-4949-906c-25378e467ab4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.687780 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsv74\" (UniqueName: \"kubernetes.io/projected/6c953264-454e-4949-906c-25378e467ab4-kube-api-access-xsv74\") pod \"nova-metadata-0\" (UID: \"6c953264-454e-4949-906c-25378e467ab4\") " pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.873955 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.929703 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.984148 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9sxqd\" (UniqueName: \"kubernetes.io/projected/61944b33-413d-4448-9f84-f2a72079bf65-kube-api-access-9sxqd\") pod \"61944b33-413d-4448-9f84-f2a72079bf65\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.984379 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-internal-tls-certs\") pod \"61944b33-413d-4448-9f84-f2a72079bf65\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.984457 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-public-tls-certs\") pod \"61944b33-413d-4448-9f84-f2a72079bf65\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.985399 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-config-data\") pod \"61944b33-413d-4448-9f84-f2a72079bf65\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.985522 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61944b33-413d-4448-9f84-f2a72079bf65-logs\") pod \"61944b33-413d-4448-9f84-f2a72079bf65\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.985701 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-combined-ca-bundle\") pod \"61944b33-413d-4448-9f84-f2a72079bf65\" (UID: \"61944b33-413d-4448-9f84-f2a72079bf65\") " Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.987765 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61944b33-413d-4448-9f84-f2a72079bf65-logs" (OuterVolumeSpecName: "logs") pod "61944b33-413d-4448-9f84-f2a72079bf65" (UID: "61944b33-413d-4448-9f84-f2a72079bf65"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:11:29 crc kubenswrapper[4848]: I0128 13:11:29.994256 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61944b33-413d-4448-9f84-f2a72079bf65-kube-api-access-9sxqd" (OuterVolumeSpecName: "kube-api-access-9sxqd") pod "61944b33-413d-4448-9f84-f2a72079bf65" (UID: "61944b33-413d-4448-9f84-f2a72079bf65"). InnerVolumeSpecName "kube-api-access-9sxqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.026813 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-config-data" (OuterVolumeSpecName: "config-data") pod "61944b33-413d-4448-9f84-f2a72079bf65" (UID: "61944b33-413d-4448-9f84-f2a72079bf65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.041855 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61944b33-413d-4448-9f84-f2a72079bf65" (UID: "61944b33-413d-4448-9f84-f2a72079bf65"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.072783 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "61944b33-413d-4448-9f84-f2a72079bf65" (UID: "61944b33-413d-4448-9f84-f2a72079bf65"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.080301 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "61944b33-413d-4448-9f84-f2a72079bf65" (UID: "61944b33-413d-4448-9f84-f2a72079bf65"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.088938 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9sxqd\" (UniqueName: \"kubernetes.io/projected/61944b33-413d-4448-9f84-f2a72079bf65-kube-api-access-9sxqd\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.088968 4848 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.088982 4848 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.088992 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.089002 4848 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61944b33-413d-4448-9f84-f2a72079bf65-logs\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.089012 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61944b33-413d-4448-9f84-f2a72079bf65-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.365162 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 13:11:30 crc kubenswrapper[4848]: W0128 13:11:30.377112 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c953264_454e_4949_906c_25378e467ab4.slice/crio-02b606e059993ddb23030f03f39aa27e2712a7eb1e8fc46d132d3e49b009c372 WatchSource:0}: Error finding container 02b606e059993ddb23030f03f39aa27e2712a7eb1e8fc46d132d3e49b009c372: Status 404 returned error can't find the container with id 02b606e059993ddb23030f03f39aa27e2712a7eb1e8fc46d132d3e49b009c372 Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.433888 4848 generic.go:334] "Generic (PLEG): container finished" podID="61944b33-413d-4448-9f84-f2a72079bf65" containerID="8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3" exitCode=0 Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.433976 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.433968 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"61944b33-413d-4448-9f84-f2a72079bf65","Type":"ContainerDied","Data":"8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3"} Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.434117 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"61944b33-413d-4448-9f84-f2a72079bf65","Type":"ContainerDied","Data":"77aa1f0b14d0d77b166dbbe44351dad280140b4ce016fb4a4d305cd9e6d0b87a"} Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.434147 4848 scope.go:117] "RemoveContainer" containerID="8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.438696 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6c953264-454e-4949-906c-25378e467ab4","Type":"ContainerStarted","Data":"02b606e059993ddb23030f03f39aa27e2712a7eb1e8fc46d132d3e49b009c372"} Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.490522 4848 scope.go:117] "RemoveContainer" containerID="c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.507964 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.581687 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.617791 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:30 crc kubenswrapper[4848]: E0128 13:11:30.618650 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-api" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.618677 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-api" Jan 28 13:11:30 crc kubenswrapper[4848]: E0128 13:11:30.618711 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-log" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.618720 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-log" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.618977 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-log" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.619005 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="61944b33-413d-4448-9f84-f2a72079bf65" containerName="nova-api-api" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.620677 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.631945 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.632214 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.632407 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.698322 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.716986 4848 scope.go:117] "RemoveContainer" containerID="8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3" Jan 28 13:11:30 crc kubenswrapper[4848]: E0128 13:11:30.717730 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3\": container with ID starting with 8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3 not found: ID does not exist" containerID="8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.717773 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3"} err="failed to get container status \"8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3\": rpc error: code = NotFound desc = could not find container \"8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3\": container with ID starting with 8ae35cd15182bebbd517a39380a03db35eceac0df772e7bb8445574fe687bee3 not found: ID does not exist" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.717810 4848 scope.go:117] "RemoveContainer" containerID="c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f" Jan 28 13:11:30 crc kubenswrapper[4848]: E0128 13:11:30.718700 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f\": container with ID starting with c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f not found: ID does not exist" containerID="c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.718732 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f"} err="failed to get container status \"c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f\": rpc error: code = NotFound desc = could not find container \"c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f\": container with ID starting with c0d09cf9585858068db19a3cb0784df432e1f7e40db07c0a01ac0555a584971f not found: ID does not exist" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.721469 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6zwn\" (UniqueName: \"kubernetes.io/projected/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-kube-api-access-h6zwn\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.721537 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-config-data\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.721652 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-public-tls-certs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.721792 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.721869 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-internal-tls-certs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.721933 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-logs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.824918 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-logs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.825647 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6zwn\" (UniqueName: \"kubernetes.io/projected/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-kube-api-access-h6zwn\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.825693 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-logs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.825708 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-config-data\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.826186 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-public-tls-certs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.827665 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.827795 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-internal-tls-certs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.834419 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-config-data\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.835153 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-internal-tls-certs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.835533 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-public-tls-certs\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.838412 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.846981 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6zwn\" (UniqueName: \"kubernetes.io/projected/68a7a2b4-9e0a-410e-b131-6bf39b7ffa35-kube-api-access-h6zwn\") pod \"nova-api-0\" (UID: \"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35\") " pod="openstack/nova-api-0" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.863606 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10e136b8-b94a-47d4-ad21-0646f193e596" path="/var/lib/kubelet/pods/10e136b8-b94a-47d4-ad21-0646f193e596/volumes" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.864704 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61944b33-413d-4448-9f84-f2a72079bf65" path="/var/lib/kubelet/pods/61944b33-413d-4448-9f84-f2a72079bf65/volumes" Jan 28 13:11:30 crc kubenswrapper[4848]: I0128 13:11:30.999140 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 13:11:31 crc kubenswrapper[4848]: I0128 13:11:31.453324 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6c953264-454e-4949-906c-25378e467ab4","Type":"ContainerStarted","Data":"ed575fcd359b003c62d94f5706815709e2510844c94bee34d29671322ce6f986"} Jan 28 13:11:31 crc kubenswrapper[4848]: I0128 13:11:31.453821 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6c953264-454e-4949-906c-25378e467ab4","Type":"ContainerStarted","Data":"f201cb9fa219bf75b29da3ab9197a3e8d96f35e0abb34b4963e9e21744e69d06"} Jan 28 13:11:31 crc kubenswrapper[4848]: I0128 13:11:31.487577 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.487552102 podStartE2EDuration="2.487552102s" podCreationTimestamp="2026-01-28 13:11:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:11:31.477099414 +0000 UTC m=+1518.389316462" watchObservedRunningTime="2026-01-28 13:11:31.487552102 +0000 UTC m=+1518.399769140" Jan 28 13:11:31 crc kubenswrapper[4848]: W0128 13:11:31.570589 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68a7a2b4_9e0a_410e_b131_6bf39b7ffa35.slice/crio-8cea657bf33863404808e471ee7db51120de29c3d3eda76bad961c46519c5d4f WatchSource:0}: Error finding container 8cea657bf33863404808e471ee7db51120de29c3d3eda76bad961c46519c5d4f: Status 404 returned error can't find the container with id 8cea657bf33863404808e471ee7db51120de29c3d3eda76bad961c46519c5d4f Jan 28 13:11:31 crc kubenswrapper[4848]: I0128 13:11:31.575602 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 13:11:32 crc kubenswrapper[4848]: E0128 13:11:32.280211 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a is running failed: container process not found" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 13:11:32 crc kubenswrapper[4848]: E0128 13:11:32.280935 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a is running failed: container process not found" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 13:11:32 crc kubenswrapper[4848]: E0128 13:11:32.281508 4848 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a is running failed: container process not found" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 13:11:32 crc kubenswrapper[4848]: E0128 13:11:32.281552 4848 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" containerName="nova-scheduler-scheduler" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.485538 4848 generic.go:334] "Generic (PLEG): container finished" podID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" exitCode=0 Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.487372 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aea94d4-8c5e-4305-85c8-bdacebcf990e","Type":"ContainerDied","Data":"0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a"} Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.492209 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35","Type":"ContainerStarted","Data":"6a0635560ae9b48eb30f557bddd49853301167a4dac028fa0e1cfc35073ffb21"} Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.492311 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35","Type":"ContainerStarted","Data":"5e76de2538ab29ccdad29b80dd950adad75168f8886c7db752ba17776658e205"} Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.492335 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"68a7a2b4-9e0a-410e-b131-6bf39b7ffa35","Type":"ContainerStarted","Data":"8cea657bf33863404808e471ee7db51120de29c3d3eda76bad961c46519c5d4f"} Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.524658 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.52463225 podStartE2EDuration="2.52463225s" podCreationTimestamp="2026-01-28 13:11:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:11:32.519199521 +0000 UTC m=+1519.431416569" watchObservedRunningTime="2026-01-28 13:11:32.52463225 +0000 UTC m=+1519.436849288" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.617341 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.690378 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txnl2\" (UniqueName: \"kubernetes.io/projected/9aea94d4-8c5e-4305-85c8-bdacebcf990e-kube-api-access-txnl2\") pod \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.690435 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-config-data\") pod \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.690633 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-combined-ca-bundle\") pod \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\" (UID: \"9aea94d4-8c5e-4305-85c8-bdacebcf990e\") " Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.702514 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9aea94d4-8c5e-4305-85c8-bdacebcf990e-kube-api-access-txnl2" (OuterVolumeSpecName: "kube-api-access-txnl2") pod "9aea94d4-8c5e-4305-85c8-bdacebcf990e" (UID: "9aea94d4-8c5e-4305-85c8-bdacebcf990e"). InnerVolumeSpecName "kube-api-access-txnl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.726254 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-config-data" (OuterVolumeSpecName: "config-data") pod "9aea94d4-8c5e-4305-85c8-bdacebcf990e" (UID: "9aea94d4-8c5e-4305-85c8-bdacebcf990e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.731357 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9aea94d4-8c5e-4305-85c8-bdacebcf990e" (UID: "9aea94d4-8c5e-4305-85c8-bdacebcf990e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.792946 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txnl2\" (UniqueName: \"kubernetes.io/projected/9aea94d4-8c5e-4305-85c8-bdacebcf990e-kube-api-access-txnl2\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.792988 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:32 crc kubenswrapper[4848]: I0128 13:11:32.793002 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aea94d4-8c5e-4305-85c8-bdacebcf990e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.503563 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aea94d4-8c5e-4305-85c8-bdacebcf990e","Type":"ContainerDied","Data":"f3fb325256d72bcdf77faee22072bbd5fab3af347a7676436ca4c13dc649f8b1"} Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.503585 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.503984 4848 scope.go:117] "RemoveContainer" containerID="0ea0a25d7c828f9095eb8bdb6e755e17020729c1f06b6e8e07552082f323a07a" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.546424 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.559451 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.570032 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:11:33 crc kubenswrapper[4848]: E0128 13:11:33.570532 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" containerName="nova-scheduler-scheduler" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.570552 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" containerName="nova-scheduler-scheduler" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.570773 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" containerName="nova-scheduler-scheduler" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.571573 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.578446 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.581457 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.711959 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725cd16a-296a-485a-9d15-df106a2c6ebc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.712175 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxsnd\" (UniqueName: \"kubernetes.io/projected/725cd16a-296a-485a-9d15-df106a2c6ebc-kube-api-access-dxsnd\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.712234 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725cd16a-296a-485a-9d15-df106a2c6ebc-config-data\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.814853 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725cd16a-296a-485a-9d15-df106a2c6ebc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.814958 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxsnd\" (UniqueName: \"kubernetes.io/projected/725cd16a-296a-485a-9d15-df106a2c6ebc-kube-api-access-dxsnd\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.814984 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725cd16a-296a-485a-9d15-df106a2c6ebc-config-data\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.829078 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725cd16a-296a-485a-9d15-df106a2c6ebc-config-data\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.833586 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725cd16a-296a-485a-9d15-df106a2c6ebc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.836237 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxsnd\" (UniqueName: \"kubernetes.io/projected/725cd16a-296a-485a-9d15-df106a2c6ebc-kube-api-access-dxsnd\") pod \"nova-scheduler-0\" (UID: \"725cd16a-296a-485a-9d15-df106a2c6ebc\") " pod="openstack/nova-scheduler-0" Jan 28 13:11:33 crc kubenswrapper[4848]: I0128 13:11:33.903024 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 13:11:34 crc kubenswrapper[4848]: I0128 13:11:34.370181 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 13:11:34 crc kubenswrapper[4848]: W0128 13:11:34.373995 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod725cd16a_296a_485a_9d15_df106a2c6ebc.slice/crio-024649b7ede8b7a114faf6f7b48ae60594280ba171e31a50bbe365e9aec3287f WatchSource:0}: Error finding container 024649b7ede8b7a114faf6f7b48ae60594280ba171e31a50bbe365e9aec3287f: Status 404 returned error can't find the container with id 024649b7ede8b7a114faf6f7b48ae60594280ba171e31a50bbe365e9aec3287f Jan 28 13:11:34 crc kubenswrapper[4848]: I0128 13:11:34.516647 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"725cd16a-296a-485a-9d15-df106a2c6ebc","Type":"ContainerStarted","Data":"024649b7ede8b7a114faf6f7b48ae60594280ba171e31a50bbe365e9aec3287f"} Jan 28 13:11:34 crc kubenswrapper[4848]: I0128 13:11:34.864374 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9aea94d4-8c5e-4305-85c8-bdacebcf990e" path="/var/lib/kubelet/pods/9aea94d4-8c5e-4305-85c8-bdacebcf990e/volumes" Jan 28 13:11:34 crc kubenswrapper[4848]: I0128 13:11:34.874910 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 13:11:34 crc kubenswrapper[4848]: I0128 13:11:34.875009 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 13:11:35 crc kubenswrapper[4848]: I0128 13:11:35.531462 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"725cd16a-296a-485a-9d15-df106a2c6ebc","Type":"ContainerStarted","Data":"2d7e92b7d6e119e382dd7bb097b6c5aa8317d7bc5aca3a1e674edb9d06facbe6"} Jan 28 13:11:38 crc kubenswrapper[4848]: I0128 13:11:38.903170 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 13:11:39 crc kubenswrapper[4848]: I0128 13:11:39.874439 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 13:11:39 crc kubenswrapper[4848]: I0128 13:11:39.874488 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 13:11:40 crc kubenswrapper[4848]: I0128 13:11:40.893553 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6c953264-454e-4949-906c-25378e467ab4" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.224:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:11:40 crc kubenswrapper[4848]: I0128 13:11:40.893613 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6c953264-454e-4949-906c-25378e467ab4" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.224:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:11:40 crc kubenswrapper[4848]: I0128 13:11:40.999398 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:11:40 crc kubenswrapper[4848]: I0128 13:11:40.999458 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 13:11:42 crc kubenswrapper[4848]: I0128 13:11:42.011527 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="68a7a2b4-9e0a-410e-b131-6bf39b7ffa35" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.225:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 13:11:42 crc kubenswrapper[4848]: I0128 13:11:42.011816 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="68a7a2b4-9e0a-410e-b131-6bf39b7ffa35" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.225:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 13:11:43 crc kubenswrapper[4848]: I0128 13:11:43.903291 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 13:11:43 crc kubenswrapper[4848]: I0128 13:11:43.950529 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 13:11:43 crc kubenswrapper[4848]: I0128 13:11:43.972391 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=10.972369103 podStartE2EDuration="10.972369103s" podCreationTimestamp="2026-01-28 13:11:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:11:35.559089093 +0000 UTC m=+1522.471306131" watchObservedRunningTime="2026-01-28 13:11:43.972369103 +0000 UTC m=+1530.884586161" Jan 28 13:11:44 crc kubenswrapper[4848]: I0128 13:11:44.669292 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 13:11:48 crc kubenswrapper[4848]: I0128 13:11:48.664271 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 13:11:49 crc kubenswrapper[4848]: I0128 13:11:49.883289 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 13:11:49 crc kubenswrapper[4848]: I0128 13:11:49.884237 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 13:11:49 crc kubenswrapper[4848]: I0128 13:11:49.888922 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 13:11:50 crc kubenswrapper[4848]: I0128 13:11:50.741622 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 13:11:51 crc kubenswrapper[4848]: I0128 13:11:51.010989 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 13:11:51 crc kubenswrapper[4848]: I0128 13:11:51.013290 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 13:11:51 crc kubenswrapper[4848]: I0128 13:11:51.016997 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 13:11:51 crc kubenswrapper[4848]: I0128 13:11:51.024309 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 13:11:51 crc kubenswrapper[4848]: I0128 13:11:51.745309 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 13:11:51 crc kubenswrapper[4848]: I0128 13:11:51.760285 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 13:12:00 crc kubenswrapper[4848]: I0128 13:12:00.005132 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:12:01 crc kubenswrapper[4848]: I0128 13:12:01.255832 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:12:03 crc kubenswrapper[4848]: I0128 13:12:03.956869 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="rabbitmq" containerID="cri-o://d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4" gracePeriod=604797 Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.138924 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="rabbitmq" containerID="cri-o://ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff" gracePeriod=604797 Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.570974 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686473 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-server-conf\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686537 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9069c6ac-fe99-41c7-8ee1-0154d87e506c-erlang-cookie-secret\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686622 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-plugins-conf\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686721 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6s4x\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-kube-api-access-q6s4x\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686791 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-confd\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686831 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-tls\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686914 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.686955 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9069c6ac-fe99-41c7-8ee1-0154d87e506c-pod-info\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.687013 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-plugins\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.687065 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-erlang-cookie\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.687140 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-config-data\") pod \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\" (UID: \"9069c6ac-fe99-41c7-8ee1-0154d87e506c\") " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.687694 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.687976 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.688549 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.700915 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.705573 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9069c6ac-fe99-41c7-8ee1-0154d87e506c-pod-info" (OuterVolumeSpecName: "pod-info") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.715221 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-kube-api-access-q6s4x" (OuterVolumeSpecName: "kube-api-access-q6s4x") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "kube-api-access-q6s4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.718549 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9069c6ac-fe99-41c7-8ee1-0154d87e506c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.728018 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.775195 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-config-data" (OuterVolumeSpecName: "config-data") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.789826 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-server-conf" (OuterVolumeSpecName: "server-conf") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.789869 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.789923 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.789951 4848 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9069c6ac-fe99-41c7-8ee1-0154d87e506c-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.789961 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.789972 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.789983 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.790008 4848 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9069c6ac-fe99-41c7-8ee1-0154d87e506c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.790017 4848 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.790027 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6s4x\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-kube-api-access-q6s4x\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.825024 4848 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.877730 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9069c6ac-fe99-41c7-8ee1-0154d87e506c" (UID: "9069c6ac-fe99-41c7-8ee1-0154d87e506c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.892910 4848 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.892955 4848 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9069c6ac-fe99-41c7-8ee1-0154d87e506c-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.892965 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9069c6ac-fe99-41c7-8ee1-0154d87e506c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.949979 4848 generic.go:334] "Generic (PLEG): container finished" podID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerID="d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4" exitCode=0 Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.950036 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.950059 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9069c6ac-fe99-41c7-8ee1-0154d87e506c","Type":"ContainerDied","Data":"d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4"} Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.950529 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9069c6ac-fe99-41c7-8ee1-0154d87e506c","Type":"ContainerDied","Data":"fd375ffabd8d2cb2d2c08099cd4b9bf955dceba32ca9edee03b2cd15153a5029"} Jan 28 13:12:05 crc kubenswrapper[4848]: I0128 13:12:05.950550 4848 scope.go:117] "RemoveContainer" containerID="d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.001119 4848 scope.go:117] "RemoveContainer" containerID="a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.004993 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.037843 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.107:5671: connect: connection refused" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.046001 4848 scope.go:117] "RemoveContainer" containerID="d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.049152 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:12:06 crc kubenswrapper[4848]: E0128 13:12:06.054417 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4\": container with ID starting with d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4 not found: ID does not exist" containerID="d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.054479 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4"} err="failed to get container status \"d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4\": rpc error: code = NotFound desc = could not find container \"d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4\": container with ID starting with d7a71c2dafd2e0dd260ffb77ee2dbe2fda7ca06827955e0b5ec40797455333f4 not found: ID does not exist" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.054509 4848 scope.go:117] "RemoveContainer" containerID="a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24" Jan 28 13:12:06 crc kubenswrapper[4848]: E0128 13:12:06.055232 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24\": container with ID starting with a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24 not found: ID does not exist" containerID="a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.055277 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24"} err="failed to get container status \"a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24\": rpc error: code = NotFound desc = could not find container \"a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24\": container with ID starting with a714038507f2372b5c5a460b12ae3b9101c42c53b9cb0c4d56fa1a0c00510f24 not found: ID does not exist" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.079300 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:12:06 crc kubenswrapper[4848]: E0128 13:12:06.079890 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="rabbitmq" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.079904 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="rabbitmq" Jan 28 13:12:06 crc kubenswrapper[4848]: E0128 13:12:06.079941 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="setup-container" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.079948 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="setup-container" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.080167 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="rabbitmq" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.081552 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.085635 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.086215 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zqzld" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.086432 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.086508 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.086841 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.093562 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.094903 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.098392 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.100738 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36728af2-3caa-4d67-bec1-ed4b2d26547c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.100921 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.103597 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36728af2-3caa-4d67-bec1-ed4b2d26547c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.103741 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.103920 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.104002 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.104035 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m279f\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-kube-api-access-m279f\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.104414 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.104725 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.105374 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.105431 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-config-data\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.209708 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-config-data\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.209802 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36728af2-3caa-4d67-bec1-ed4b2d26547c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210145 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210262 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36728af2-3caa-4d67-bec1-ed4b2d26547c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210331 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210412 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210455 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m279f\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-kube-api-access-m279f\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210477 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210609 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210687 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.210709 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.211149 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-config-data\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.211471 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.211506 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.211550 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.212025 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.212589 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/36728af2-3caa-4d67-bec1-ed4b2d26547c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.219637 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36728af2-3caa-4d67-bec1-ed4b2d26547c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.227987 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.230754 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36728af2-3caa-4d67-bec1-ed4b2d26547c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.230978 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.242943 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m279f\" (UniqueName: \"kubernetes.io/projected/36728af2-3caa-4d67-bec1-ed4b2d26547c-kube-api-access-m279f\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.272218 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"36728af2-3caa-4d67-bec1-ed4b2d26547c\") " pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.406431 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.759907 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824110 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-server-conf\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824188 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cmpq\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-kube-api-access-8cmpq\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824218 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824380 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-config-data\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824500 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6be2776-ada1-4c48-9588-9e488283ee6e-erlang-cookie-secret\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824556 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6be2776-ada1-4c48-9588-9e488283ee6e-pod-info\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824744 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-confd\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824789 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-plugins\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824829 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-plugins-conf\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824885 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-erlang-cookie\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.824919 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-tls\") pod \"b6be2776-ada1-4c48-9588-9e488283ee6e\" (UID: \"b6be2776-ada1-4c48-9588-9e488283ee6e\") " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.843337 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.843843 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.844270 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.845445 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.846775 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-kube-api-access-8cmpq" (OuterVolumeSpecName: "kube-api-access-8cmpq") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "kube-api-access-8cmpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.873686 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.876352 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6be2776-ada1-4c48-9588-9e488283ee6e-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.877480 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b6be2776-ada1-4c48-9588-9e488283ee6e-pod-info" (OuterVolumeSpecName: "pod-info") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.882290 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" path="/var/lib/kubelet/pods/9069c6ac-fe99-41c7-8ee1-0154d87e506c/volumes" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.888868 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.915772 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-config-data" (OuterVolumeSpecName: "config-data") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.946863 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cmpq\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-kube-api-access-8cmpq\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947015 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947034 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947048 4848 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6be2776-ada1-4c48-9588-9e488283ee6e-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947061 4848 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6be2776-ada1-4c48-9588-9e488283ee6e-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947071 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947082 4848 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947095 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:06 crc kubenswrapper[4848]: I0128 13:12:06.947103 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.000487 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36728af2-3caa-4d67-bec1-ed4b2d26547c","Type":"ContainerStarted","Data":"c2c66f410abd5a4ab5f4101f81cb91c7cd28cb4132695fbd09f203bddf6b0382"} Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.013159 4848 generic.go:334] "Generic (PLEG): container finished" podID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerID="ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff" exitCode=0 Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.013240 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6be2776-ada1-4c48-9588-9e488283ee6e","Type":"ContainerDied","Data":"ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff"} Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.013319 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.013358 4848 scope.go:117] "RemoveContainer" containerID="ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.013334 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6be2776-ada1-4c48-9588-9e488283ee6e","Type":"ContainerDied","Data":"edbaa59f957bf2e0bba233789d74da933f88cb587e9303c2e2bc9bc9b393f01a"} Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.043575 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-server-conf" (OuterVolumeSpecName: "server-conf") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.046889 4848 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.054070 4848 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6be2776-ada1-4c48-9588-9e488283ee6e-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.054101 4848 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.069580 4848 scope.go:117] "RemoveContainer" containerID="5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.096728 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b6be2776-ada1-4c48-9588-9e488283ee6e" (UID: "b6be2776-ada1-4c48-9588-9e488283ee6e"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.099652 4848 scope.go:117] "RemoveContainer" containerID="ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff" Jan 28 13:12:07 crc kubenswrapper[4848]: E0128 13:12:07.100373 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff\": container with ID starting with ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff not found: ID does not exist" containerID="ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.100413 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff"} err="failed to get container status \"ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff\": rpc error: code = NotFound desc = could not find container \"ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff\": container with ID starting with ef51a88173bc79d42decf8a73cfa04c65b89a572426ddfa2b42b75987adda2ff not found: ID does not exist" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.100440 4848 scope.go:117] "RemoveContainer" containerID="5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e" Jan 28 13:12:07 crc kubenswrapper[4848]: E0128 13:12:07.100836 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e\": container with ID starting with 5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e not found: ID does not exist" containerID="5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.100870 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e"} err="failed to get container status \"5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e\": rpc error: code = NotFound desc = could not find container \"5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e\": container with ID starting with 5093e2826031e6f1744f0acae7e4b211a54ff1a80fdff714df96e9dda60b1b4e not found: ID does not exist" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.156043 4848 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6be2776-ada1-4c48-9588-9e488283ee6e-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.362775 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.374585 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.391510 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:12:07 crc kubenswrapper[4848]: E0128 13:12:07.392730 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="setup-container" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.392756 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="setup-container" Jan 28 13:12:07 crc kubenswrapper[4848]: E0128 13:12:07.392771 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="rabbitmq" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.392779 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="rabbitmq" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.393068 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" containerName="rabbitmq" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.395792 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.398515 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-lts5k" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.398516 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.398618 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.398833 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.400464 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.405632 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.408558 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.434565 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570230 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570374 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2255ce73-5019-4b86-b15b-1e390099af55-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570428 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570485 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570506 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570530 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570626 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2255ce73-5019-4b86-b15b-1e390099af55-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.570717 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlvxm\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-kube-api-access-nlvxm\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.571010 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.571047 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.571076 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.672989 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673072 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673102 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673135 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673171 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2255ce73-5019-4b86-b15b-1e390099af55-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673284 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlvxm\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-kube-api-access-nlvxm\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673330 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673372 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673402 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673478 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673544 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2255ce73-5019-4b86-b15b-1e390099af55-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.673830 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.674040 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.674435 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.674517 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.675122 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.675129 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2255ce73-5019-4b86-b15b-1e390099af55-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.680858 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.680901 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.681285 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2255ce73-5019-4b86-b15b-1e390099af55-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.684933 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2255ce73-5019-4b86-b15b-1e390099af55-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.697075 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlvxm\" (UniqueName: \"kubernetes.io/projected/2255ce73-5019-4b86-b15b-1e390099af55-kube-api-access-nlvxm\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.712616 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2255ce73-5019-4b86-b15b-1e390099af55\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:07 crc kubenswrapper[4848]: I0128 13:12:07.717771 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:08 crc kubenswrapper[4848]: I0128 13:12:08.394011 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 13:12:08 crc kubenswrapper[4848]: W0128 13:12:08.404561 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2255ce73_5019_4b86_b15b_1e390099af55.slice/crio-e8f2006a6b44211afd5df89ecae7f7d51630d04e32a8be02366eab2d4543751b WatchSource:0}: Error finding container e8f2006a6b44211afd5df89ecae7f7d51630d04e32a8be02366eab2d4543751b: Status 404 returned error can't find the container with id e8f2006a6b44211afd5df89ecae7f7d51630d04e32a8be02366eab2d4543751b Jan 28 13:12:08 crc kubenswrapper[4848]: I0128 13:12:08.865276 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6be2776-ada1-4c48-9588-9e488283ee6e" path="/var/lib/kubelet/pods/b6be2776-ada1-4c48-9588-9e488283ee6e/volumes" Jan 28 13:12:09 crc kubenswrapper[4848]: I0128 13:12:09.053033 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2255ce73-5019-4b86-b15b-1e390099af55","Type":"ContainerStarted","Data":"e8f2006a6b44211afd5df89ecae7f7d51630d04e32a8be02366eab2d4543751b"} Jan 28 13:12:09 crc kubenswrapper[4848]: I0128 13:12:09.055232 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36728af2-3caa-4d67-bec1-ed4b2d26547c","Type":"ContainerStarted","Data":"81cc0db6ae224ef7b0cc27333e00e43f7914f31da586cee7595fe1035b9c2013"} Jan 28 13:12:10 crc kubenswrapper[4848]: I0128 13:12:10.353548 4848 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="9069c6ac-fe99-41c7-8ee1-0154d87e506c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: i/o timeout" Jan 28 13:12:11 crc kubenswrapper[4848]: I0128 13:12:11.082688 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2255ce73-5019-4b86-b15b-1e390099af55","Type":"ContainerStarted","Data":"b15ebd6e642973ca1e1b6a8cc89c8527d9fa870297d0659c5968d73656c22d56"} Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.271732 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-566ff98dd5-pzvmf"] Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.274897 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.284052 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.284974 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566ff98dd5-pzvmf"] Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.438398 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-swift-storage-0\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.438746 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gqg2\" (UniqueName: \"kubernetes.io/projected/c47cbe99-d93b-4e1f-be2f-94694bc1996b-kube-api-access-7gqg2\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.439056 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-sb\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.439190 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-nb\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.439373 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-svc\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.439605 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-config\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.439786 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-openstack-edpm-ipam\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.486070 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566ff98dd5-pzvmf"] Jan 28 13:12:14 crc kubenswrapper[4848]: E0128 13:12:14.487417 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-7gqg2 openstack-edpm-ipam ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" podUID="c47cbe99-d93b-4e1f-be2f-94694bc1996b" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.512449 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55b94cdbb7-56ttn"] Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.514980 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.530086 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55b94cdbb7-56ttn"] Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.542321 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-swift-storage-0\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.542387 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gqg2\" (UniqueName: \"kubernetes.io/projected/c47cbe99-d93b-4e1f-be2f-94694bc1996b-kube-api-access-7gqg2\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.542460 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-sb\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.542502 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-nb\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.543651 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-sb\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.543645 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-swift-storage-0\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.543719 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-nb\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.543728 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-svc\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.543835 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-config\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.543985 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-openstack-edpm-ipam\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.544494 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-svc\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.545099 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-openstack-edpm-ipam\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.545339 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-config\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.579394 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gqg2\" (UniqueName: \"kubernetes.io/projected/c47cbe99-d93b-4e1f-be2f-94694bc1996b-kube-api-access-7gqg2\") pod \"dnsmasq-dns-566ff98dd5-pzvmf\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.646153 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-dns-swift-storage-0\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.646787 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-openstack-edpm-ipam\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.646838 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-config\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.647084 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-ovsdbserver-sb\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.647117 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-ovsdbserver-nb\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.647166 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns6lw\" (UniqueName: \"kubernetes.io/projected/91425abd-325a-48c8-9c49-34b409614808-kube-api-access-ns6lw\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.647243 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-dns-svc\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.749905 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-openstack-edpm-ipam\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.749969 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-config\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.750017 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-ovsdbserver-sb\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.750041 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-ovsdbserver-nb\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.750073 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns6lw\" (UniqueName: \"kubernetes.io/projected/91425abd-325a-48c8-9c49-34b409614808-kube-api-access-ns6lw\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.750122 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-dns-svc\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.750171 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-dns-swift-storage-0\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.751444 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-dns-swift-storage-0\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.751770 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-ovsdbserver-sb\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.751885 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-ovsdbserver-nb\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.751988 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-openstack-edpm-ipam\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.752029 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-config\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.752013 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91425abd-325a-48c8-9c49-34b409614808-dns-svc\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.770629 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns6lw\" (UniqueName: \"kubernetes.io/projected/91425abd-325a-48c8-9c49-34b409614808-kube-api-access-ns6lw\") pod \"dnsmasq-dns-55b94cdbb7-56ttn\" (UID: \"91425abd-325a-48c8-9c49-34b409614808\") " pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:14 crc kubenswrapper[4848]: I0128 13:12:14.835622 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.128879 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.143852 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.264136 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gqg2\" (UniqueName: \"kubernetes.io/projected/c47cbe99-d93b-4e1f-be2f-94694bc1996b-kube-api-access-7gqg2\") pod \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.264233 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-openstack-edpm-ipam\") pod \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.264326 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-swift-storage-0\") pod \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.264359 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-sb\") pod \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.264395 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-nb\") pod \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.264466 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-config\") pod \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.264532 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-svc\") pod \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\" (UID: \"c47cbe99-d93b-4e1f-be2f-94694bc1996b\") " Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.265244 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-config" (OuterVolumeSpecName: "config") pod "c47cbe99-d93b-4e1f-be2f-94694bc1996b" (UID: "c47cbe99-d93b-4e1f-be2f-94694bc1996b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.265231 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c47cbe99-d93b-4e1f-be2f-94694bc1996b" (UID: "c47cbe99-d93b-4e1f-be2f-94694bc1996b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.265299 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c47cbe99-d93b-4e1f-be2f-94694bc1996b" (UID: "c47cbe99-d93b-4e1f-be2f-94694bc1996b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.265619 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c47cbe99-d93b-4e1f-be2f-94694bc1996b" (UID: "c47cbe99-d93b-4e1f-be2f-94694bc1996b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.265682 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "c47cbe99-d93b-4e1f-be2f-94694bc1996b" (UID: "c47cbe99-d93b-4e1f-be2f-94694bc1996b"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.265712 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c47cbe99-d93b-4e1f-be2f-94694bc1996b" (UID: "c47cbe99-d93b-4e1f-be2f-94694bc1996b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.273303 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c47cbe99-d93b-4e1f-be2f-94694bc1996b-kube-api-access-7gqg2" (OuterVolumeSpecName: "kube-api-access-7gqg2") pod "c47cbe99-d93b-4e1f-be2f-94694bc1996b" (UID: "c47cbe99-d93b-4e1f-be2f-94694bc1996b"). InnerVolumeSpecName "kube-api-access-7gqg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.339224 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55b94cdbb7-56ttn"] Jan 28 13:12:15 crc kubenswrapper[4848]: W0128 13:12:15.347161 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91425abd_325a_48c8_9c49_34b409614808.slice/crio-a22b76616930482372eed3b54bf8eb197d9c7bb27fe6a8fc34048e0b7eec94c2 WatchSource:0}: Error finding container a22b76616930482372eed3b54bf8eb197d9c7bb27fe6a8fc34048e0b7eec94c2: Status 404 returned error can't find the container with id a22b76616930482372eed3b54bf8eb197d9c7bb27fe6a8fc34048e0b7eec94c2 Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.370374 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gqg2\" (UniqueName: \"kubernetes.io/projected/c47cbe99-d93b-4e1f-be2f-94694bc1996b-kube-api-access-7gqg2\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.370753 4848 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.370840 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.370969 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.371059 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.372276 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:15 crc kubenswrapper[4848]: I0128 13:12:15.372379 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c47cbe99-d93b-4e1f-be2f-94694bc1996b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:16 crc kubenswrapper[4848]: I0128 13:12:16.148328 4848 generic.go:334] "Generic (PLEG): container finished" podID="91425abd-325a-48c8-9c49-34b409614808" containerID="10909b1dc5451cdcda1bea6fc257dd12a8f0d4d1e2a328b03802e6846da2e9db" exitCode=0 Jan 28 13:12:16 crc kubenswrapper[4848]: I0128 13:12:16.148946 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566ff98dd5-pzvmf" Jan 28 13:12:16 crc kubenswrapper[4848]: I0128 13:12:16.149222 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" event={"ID":"91425abd-325a-48c8-9c49-34b409614808","Type":"ContainerDied","Data":"10909b1dc5451cdcda1bea6fc257dd12a8f0d4d1e2a328b03802e6846da2e9db"} Jan 28 13:12:16 crc kubenswrapper[4848]: I0128 13:12:16.149289 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" event={"ID":"91425abd-325a-48c8-9c49-34b409614808","Type":"ContainerStarted","Data":"a22b76616930482372eed3b54bf8eb197d9c7bb27fe6a8fc34048e0b7eec94c2"} Jan 28 13:12:16 crc kubenswrapper[4848]: I0128 13:12:16.240197 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566ff98dd5-pzvmf"] Jan 28 13:12:16 crc kubenswrapper[4848]: I0128 13:12:16.271107 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-566ff98dd5-pzvmf"] Jan 28 13:12:16 crc kubenswrapper[4848]: I0128 13:12:16.867121 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c47cbe99-d93b-4e1f-be2f-94694bc1996b" path="/var/lib/kubelet/pods/c47cbe99-d93b-4e1f-be2f-94694bc1996b/volumes" Jan 28 13:12:17 crc kubenswrapper[4848]: I0128 13:12:17.166370 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" event={"ID":"91425abd-325a-48c8-9c49-34b409614808","Type":"ContainerStarted","Data":"02f4c9fc9fbe2ca84a2070e61e32ad9ac00b163956589f015ec81398cc771b3f"} Jan 28 13:12:17 crc kubenswrapper[4848]: I0128 13:12:17.166589 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:17 crc kubenswrapper[4848]: I0128 13:12:17.192026 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" podStartSLOduration=3.192004049 podStartE2EDuration="3.192004049s" podCreationTimestamp="2026-01-28 13:12:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:12:17.186708753 +0000 UTC m=+1564.098925791" watchObservedRunningTime="2026-01-28 13:12:17.192004049 +0000 UTC m=+1564.104221087" Jan 28 13:12:24 crc kubenswrapper[4848]: I0128 13:12:24.837418 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55b94cdbb7-56ttn" Jan 28 13:12:24 crc kubenswrapper[4848]: I0128 13:12:24.958879 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bb56bffb5-dphbx"] Jan 28 13:12:24 crc kubenswrapper[4848]: I0128 13:12:24.959206 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" podUID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerName="dnsmasq-dns" containerID="cri-o://8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745" gracePeriod=10 Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.818302 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.864290 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-swift-storage-0\") pod \"5e75b70e-a61e-411a-b2ec-40478a36ba48\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.865603 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgwmk\" (UniqueName: \"kubernetes.io/projected/5e75b70e-a61e-411a-b2ec-40478a36ba48-kube-api-access-cgwmk\") pod \"5e75b70e-a61e-411a-b2ec-40478a36ba48\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.865743 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-nb\") pod \"5e75b70e-a61e-411a-b2ec-40478a36ba48\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.865791 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-config\") pod \"5e75b70e-a61e-411a-b2ec-40478a36ba48\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.865860 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-sb\") pod \"5e75b70e-a61e-411a-b2ec-40478a36ba48\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.865910 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-svc\") pod \"5e75b70e-a61e-411a-b2ec-40478a36ba48\" (UID: \"5e75b70e-a61e-411a-b2ec-40478a36ba48\") " Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.892996 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e75b70e-a61e-411a-b2ec-40478a36ba48-kube-api-access-cgwmk" (OuterVolumeSpecName: "kube-api-access-cgwmk") pod "5e75b70e-a61e-411a-b2ec-40478a36ba48" (UID: "5e75b70e-a61e-411a-b2ec-40478a36ba48"). InnerVolumeSpecName "kube-api-access-cgwmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.950069 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5e75b70e-a61e-411a-b2ec-40478a36ba48" (UID: "5e75b70e-a61e-411a-b2ec-40478a36ba48"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.950833 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-config" (OuterVolumeSpecName: "config") pod "5e75b70e-a61e-411a-b2ec-40478a36ba48" (UID: "5e75b70e-a61e-411a-b2ec-40478a36ba48"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.962172 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5e75b70e-a61e-411a-b2ec-40478a36ba48" (UID: "5e75b70e-a61e-411a-b2ec-40478a36ba48"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.969086 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgwmk\" (UniqueName: \"kubernetes.io/projected/5e75b70e-a61e-411a-b2ec-40478a36ba48-kube-api-access-cgwmk\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.969126 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.969140 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.969152 4848 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.973634 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5e75b70e-a61e-411a-b2ec-40478a36ba48" (UID: "5e75b70e-a61e-411a-b2ec-40478a36ba48"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:25 crc kubenswrapper[4848]: I0128 13:12:25.980094 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5e75b70e-a61e-411a-b2ec-40478a36ba48" (UID: "5e75b70e-a61e-411a-b2ec-40478a36ba48"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.071646 4848 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.071707 4848 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e75b70e-a61e-411a-b2ec-40478a36ba48-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.278926 4848 generic.go:334] "Generic (PLEG): container finished" podID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerID="8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745" exitCode=0 Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.278986 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" event={"ID":"5e75b70e-a61e-411a-b2ec-40478a36ba48","Type":"ContainerDied","Data":"8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745"} Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.279026 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" event={"ID":"5e75b70e-a61e-411a-b2ec-40478a36ba48","Type":"ContainerDied","Data":"2b9756d5d5fdeef1f19138b6cdb80565ea1b7488f5cd0d23cd0addbb41133524"} Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.279048 4848 scope.go:117] "RemoveContainer" containerID="8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.279388 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb56bffb5-dphbx" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.306236 4848 scope.go:117] "RemoveContainer" containerID="03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.331495 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bb56bffb5-dphbx"] Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.349629 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bb56bffb5-dphbx"] Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.350086 4848 scope.go:117] "RemoveContainer" containerID="8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745" Jan 28 13:12:26 crc kubenswrapper[4848]: E0128 13:12:26.350637 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745\": container with ID starting with 8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745 not found: ID does not exist" containerID="8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.350738 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745"} err="failed to get container status \"8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745\": rpc error: code = NotFound desc = could not find container \"8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745\": container with ID starting with 8fbde88c7b6bf0e90185e9d690bf8f9d21c70e26306d236c2a508baf85e50745 not found: ID does not exist" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.350816 4848 scope.go:117] "RemoveContainer" containerID="03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902" Jan 28 13:12:26 crc kubenswrapper[4848]: E0128 13:12:26.351622 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902\": container with ID starting with 03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902 not found: ID does not exist" containerID="03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.351652 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902"} err="failed to get container status \"03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902\": rpc error: code = NotFound desc = could not find container \"03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902\": container with ID starting with 03b983eaa4f2f298251efc7eeb43e244de1d984170f1f43779ffb9b395118902 not found: ID does not exist" Jan 28 13:12:26 crc kubenswrapper[4848]: I0128 13:12:26.862224 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e75b70e-a61e-411a-b2ec-40478a36ba48" path="/var/lib/kubelet/pods/5e75b70e-a61e-411a-b2ec-40478a36ba48/volumes" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.937458 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9"] Jan 28 13:12:38 crc kubenswrapper[4848]: E0128 13:12:38.940228 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerName="dnsmasq-dns" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.940347 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerName="dnsmasq-dns" Jan 28 13:12:38 crc kubenswrapper[4848]: E0128 13:12:38.940430 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerName="init" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.940502 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerName="init" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.940835 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e75b70e-a61e-411a-b2ec-40478a36ba48" containerName="dnsmasq-dns" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.942348 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.945207 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.947650 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.948310 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.948354 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.979748 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9"] Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.984954 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.994562 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k5d7\" (UniqueName: \"kubernetes.io/projected/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-kube-api-access-2k5d7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:38 crc kubenswrapper[4848]: I0128 13:12:38.994849 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.000527 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.102588 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.102663 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.102731 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k5d7\" (UniqueName: \"kubernetes.io/projected/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-kube-api-access-2k5d7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.102790 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.110557 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.112906 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.118274 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.122695 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k5d7\" (UniqueName: \"kubernetes.io/projected/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-kube-api-access-2k5d7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.274330 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:12:39 crc kubenswrapper[4848]: I0128 13:12:39.876715 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9"] Jan 28 13:12:40 crc kubenswrapper[4848]: I0128 13:12:40.481862 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" event={"ID":"0c2e6d21-25c3-4653-bd87-18f42e3a68a5","Type":"ContainerStarted","Data":"0df11a879d0ccf10e0a0f400619b9788e7c564d986093f8fd11f46829e78eb81"} Jan 28 13:12:41 crc kubenswrapper[4848]: I0128 13:12:41.497673 4848 generic.go:334] "Generic (PLEG): container finished" podID="36728af2-3caa-4d67-bec1-ed4b2d26547c" containerID="81cc0db6ae224ef7b0cc27333e00e43f7914f31da586cee7595fe1035b9c2013" exitCode=0 Jan 28 13:12:41 crc kubenswrapper[4848]: I0128 13:12:41.497900 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36728af2-3caa-4d67-bec1-ed4b2d26547c","Type":"ContainerDied","Data":"81cc0db6ae224ef7b0cc27333e00e43f7914f31da586cee7595fe1035b9c2013"} Jan 28 13:12:43 crc kubenswrapper[4848]: I0128 13:12:43.522997 4848 generic.go:334] "Generic (PLEG): container finished" podID="2255ce73-5019-4b86-b15b-1e390099af55" containerID="b15ebd6e642973ca1e1b6a8cc89c8527d9fa870297d0659c5968d73656c22d56" exitCode=0 Jan 28 13:12:43 crc kubenswrapper[4848]: I0128 13:12:43.523042 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2255ce73-5019-4b86-b15b-1e390099af55","Type":"ContainerDied","Data":"b15ebd6e642973ca1e1b6a8cc89c8527d9fa870297d0659c5968d73656c22d56"} Jan 28 13:12:43 crc kubenswrapper[4848]: I0128 13:12:43.530220 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36728af2-3caa-4d67-bec1-ed4b2d26547c","Type":"ContainerStarted","Data":"e992fa8b28a27d62c8f464b1c9a2a56bab7ac72529d9ac3ab83764c5e34a7ea5"} Jan 28 13:12:43 crc kubenswrapper[4848]: I0128 13:12:43.530438 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 28 13:12:43 crc kubenswrapper[4848]: I0128 13:12:43.588292 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.588271043 podStartE2EDuration="37.588271043s" podCreationTimestamp="2026-01-28 13:12:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:12:43.578763101 +0000 UTC m=+1590.490980139" watchObservedRunningTime="2026-01-28 13:12:43.588271043 +0000 UTC m=+1590.500488081" Jan 28 13:12:51 crc kubenswrapper[4848]: I0128 13:12:51.633866 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" event={"ID":"0c2e6d21-25c3-4653-bd87-18f42e3a68a5","Type":"ContainerStarted","Data":"9eafc5618074fda62ed5841592fee02e90be7febbacfb209407a546ef499c63c"} Jan 28 13:12:51 crc kubenswrapper[4848]: I0128 13:12:51.636219 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2255ce73-5019-4b86-b15b-1e390099af55","Type":"ContainerStarted","Data":"ff5d36e60730e958ba0a616edf563a815c233a71053585f2d211b40cc6cfe693"} Jan 28 13:12:51 crc kubenswrapper[4848]: I0128 13:12:51.636485 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:12:51 crc kubenswrapper[4848]: I0128 13:12:51.662450 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" podStartSLOduration=2.8961846060000003 podStartE2EDuration="13.662430157s" podCreationTimestamp="2026-01-28 13:12:38 +0000 UTC" firstStartedPulling="2026-01-28 13:12:39.882623894 +0000 UTC m=+1586.794840942" lastFinishedPulling="2026-01-28 13:12:50.648869455 +0000 UTC m=+1597.561086493" observedRunningTime="2026-01-28 13:12:51.658599531 +0000 UTC m=+1598.570816579" watchObservedRunningTime="2026-01-28 13:12:51.662430157 +0000 UTC m=+1598.574647195" Jan 28 13:12:51 crc kubenswrapper[4848]: I0128 13:12:51.693683 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=44.693655046 podStartE2EDuration="44.693655046s" podCreationTimestamp="2026-01-28 13:12:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:12:51.688293778 +0000 UTC m=+1598.600510816" watchObservedRunningTime="2026-01-28 13:12:51.693655046 +0000 UTC m=+1598.605872084" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.520786 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tdsql"] Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.524181 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.552213 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdsql"] Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.580870 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-utilities\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.581037 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9cmn\" (UniqueName: \"kubernetes.io/projected/cb7d4944-168d-4dbf-a725-66f110c1566b-kube-api-access-v9cmn\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.581070 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-catalog-content\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.684601 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-utilities\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.684661 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9cmn\" (UniqueName: \"kubernetes.io/projected/cb7d4944-168d-4dbf-a725-66f110c1566b-kube-api-access-v9cmn\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.684699 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-catalog-content\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.685163 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-utilities\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.685798 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-catalog-content\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.710889 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9cmn\" (UniqueName: \"kubernetes.io/projected/cb7d4944-168d-4dbf-a725-66f110c1566b-kube-api-access-v9cmn\") pod \"community-operators-tdsql\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:54 crc kubenswrapper[4848]: I0128 13:12:54.853376 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:12:56 crc kubenswrapper[4848]: I0128 13:12:56.308287 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdsql"] Jan 28 13:12:56 crc kubenswrapper[4848]: I0128 13:12:56.414474 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 28 13:12:56 crc kubenswrapper[4848]: I0128 13:12:56.689330 4848 generic.go:334] "Generic (PLEG): container finished" podID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerID="55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c" exitCode=0 Jan 28 13:12:56 crc kubenswrapper[4848]: I0128 13:12:56.689403 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdsql" event={"ID":"cb7d4944-168d-4dbf-a725-66f110c1566b","Type":"ContainerDied","Data":"55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c"} Jan 28 13:12:56 crc kubenswrapper[4848]: I0128 13:12:56.689447 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdsql" event={"ID":"cb7d4944-168d-4dbf-a725-66f110c1566b","Type":"ContainerStarted","Data":"a9b94e84e2672a9c8239a914b2ec65d3bab03d4ef7766d444f5938ec20c137e4"} Jan 28 13:12:58 crc kubenswrapper[4848]: I0128 13:12:58.714535 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdsql" event={"ID":"cb7d4944-168d-4dbf-a725-66f110c1566b","Type":"ContainerStarted","Data":"dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277"} Jan 28 13:12:59 crc kubenswrapper[4848]: I0128 13:12:59.727085 4848 generic.go:334] "Generic (PLEG): container finished" podID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerID="dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277" exitCode=0 Jan 28 13:12:59 crc kubenswrapper[4848]: I0128 13:12:59.727191 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdsql" event={"ID":"cb7d4944-168d-4dbf-a725-66f110c1566b","Type":"ContainerDied","Data":"dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277"} Jan 28 13:13:00 crc kubenswrapper[4848]: I0128 13:13:00.740762 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdsql" event={"ID":"cb7d4944-168d-4dbf-a725-66f110c1566b","Type":"ContainerStarted","Data":"67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d"} Jan 28 13:13:00 crc kubenswrapper[4848]: I0128 13:13:00.769329 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tdsql" podStartSLOduration=3.323846353 podStartE2EDuration="6.7693075s" podCreationTimestamp="2026-01-28 13:12:54 +0000 UTC" firstStartedPulling="2026-01-28 13:12:56.691266869 +0000 UTC m=+1603.603483897" lastFinishedPulling="2026-01-28 13:13:00.136727986 +0000 UTC m=+1607.048945044" observedRunningTime="2026-01-28 13:13:00.763101208 +0000 UTC m=+1607.675318266" watchObservedRunningTime="2026-01-28 13:13:00.7693075 +0000 UTC m=+1607.681524538" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.265747 4848 scope.go:117] "RemoveContainer" containerID="bd4995e919c1127dc4db389c37f912ca1e4ff74e8814effa22e3f5c8571e28f6" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.289307 4848 scope.go:117] "RemoveContainer" containerID="437d506ab6d6d84fb0f608a850f732026426545988daf1c868b4ab308e2d895f" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.350365 4848 scope.go:117] "RemoveContainer" containerID="1336ef97eb2f49fc570ecd186bea56ae243a27ff5507d7981d6a6805b0007e26" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.692186 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-st4dg"] Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.695157 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.711276 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-st4dg"] Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.763622 4848 generic.go:334] "Generic (PLEG): container finished" podID="0c2e6d21-25c3-4653-bd87-18f42e3a68a5" containerID="9eafc5618074fda62ed5841592fee02e90be7febbacfb209407a546ef499c63c" exitCode=0 Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.763667 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" event={"ID":"0c2e6d21-25c3-4653-bd87-18f42e3a68a5","Type":"ContainerDied","Data":"9eafc5618074fda62ed5841592fee02e90be7febbacfb209407a546ef499c63c"} Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.771869 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-utilities\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.771997 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-catalog-content\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.772038 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms9rv\" (UniqueName: \"kubernetes.io/projected/ad18ff50-d577-4b5e-979b-1d7e86020f4d-kube-api-access-ms9rv\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.876422 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms9rv\" (UniqueName: \"kubernetes.io/projected/ad18ff50-d577-4b5e-979b-1d7e86020f4d-kube-api-access-ms9rv\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.877085 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-utilities\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.878155 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-utilities\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.878309 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-catalog-content\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.878594 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-catalog-content\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:02 crc kubenswrapper[4848]: I0128 13:13:02.898406 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms9rv\" (UniqueName: \"kubernetes.io/projected/ad18ff50-d577-4b5e-979b-1d7e86020f4d-kube-api-access-ms9rv\") pod \"certified-operators-st4dg\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:03 crc kubenswrapper[4848]: I0128 13:13:03.016180 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:03 crc kubenswrapper[4848]: I0128 13:13:03.587800 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-st4dg"] Jan 28 13:13:03 crc kubenswrapper[4848]: I0128 13:13:03.795959 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4dg" event={"ID":"ad18ff50-d577-4b5e-979b-1d7e86020f4d","Type":"ContainerStarted","Data":"004ffa8156dbd32893b1ba8493d1f57a03aacd2983db949b8333f39e2c033dfa"} Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.315444 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.413485 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-ssh-key-openstack-edpm-ipam\") pod \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.413592 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k5d7\" (UniqueName: \"kubernetes.io/projected/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-kube-api-access-2k5d7\") pod \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.413670 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-repo-setup-combined-ca-bundle\") pod \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.413836 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-inventory\") pod \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\" (UID: \"0c2e6d21-25c3-4653-bd87-18f42e3a68a5\") " Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.420979 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-kube-api-access-2k5d7" (OuterVolumeSpecName: "kube-api-access-2k5d7") pod "0c2e6d21-25c3-4653-bd87-18f42e3a68a5" (UID: "0c2e6d21-25c3-4653-bd87-18f42e3a68a5"). InnerVolumeSpecName "kube-api-access-2k5d7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.422639 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "0c2e6d21-25c3-4653-bd87-18f42e3a68a5" (UID: "0c2e6d21-25c3-4653-bd87-18f42e3a68a5"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.447684 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-inventory" (OuterVolumeSpecName: "inventory") pod "0c2e6d21-25c3-4653-bd87-18f42e3a68a5" (UID: "0c2e6d21-25c3-4653-bd87-18f42e3a68a5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.449322 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0c2e6d21-25c3-4653-bd87-18f42e3a68a5" (UID: "0c2e6d21-25c3-4653-bd87-18f42e3a68a5"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.516376 4848 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.516423 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.516434 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.516444 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k5d7\" (UniqueName: \"kubernetes.io/projected/0c2e6d21-25c3-4653-bd87-18f42e3a68a5-kube-api-access-2k5d7\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.811692 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" event={"ID":"0c2e6d21-25c3-4653-bd87-18f42e3a68a5","Type":"ContainerDied","Data":"0df11a879d0ccf10e0a0f400619b9788e7c564d986093f8fd11f46829e78eb81"} Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.811734 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0df11a879d0ccf10e0a0f400619b9788e7c564d986093f8fd11f46829e78eb81" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.811796 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.821948 4848 generic.go:334] "Generic (PLEG): container finished" podID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerID="d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54" exitCode=0 Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.822016 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4dg" event={"ID":"ad18ff50-d577-4b5e-979b-1d7e86020f4d","Type":"ContainerDied","Data":"d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54"} Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.896187 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.896240 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.898436 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz"] Jan 28 13:13:04 crc kubenswrapper[4848]: E0128 13:13:04.899006 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c2e6d21-25c3-4653-bd87-18f42e3a68a5" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.899106 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c2e6d21-25c3-4653-bd87-18f42e3a68a5" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.899853 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c2e6d21-25c3-4653-bd87-18f42e3a68a5" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.901025 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.907137 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.907307 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.907229 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.910421 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz"] Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.915579 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.928430 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.928666 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmr8v\" (UniqueName: \"kubernetes.io/projected/c1cb683f-398f-4145-aa62-96ecbb02e82d-kube-api-access-cmr8v\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.928704 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:04 crc kubenswrapper[4848]: I0128 13:13:04.940318 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.031356 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmr8v\" (UniqueName: \"kubernetes.io/projected/c1cb683f-398f-4145-aa62-96ecbb02e82d-kube-api-access-cmr8v\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.031918 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.032014 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.039777 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.043349 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.050559 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmr8v\" (UniqueName: \"kubernetes.io/projected/c1cb683f-398f-4145-aa62-96ecbb02e82d-kube-api-access-cmr8v\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-w9xdz\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.222879 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.794931 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz"] Jan 28 13:13:05 crc kubenswrapper[4848]: W0128 13:13:05.802466 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1cb683f_398f_4145_aa62_96ecbb02e82d.slice/crio-ec031f5f6f5fe52ecf304ccc313dc18d36e7e707f780ec8fccc9211e560335c7 WatchSource:0}: Error finding container ec031f5f6f5fe52ecf304ccc313dc18d36e7e707f780ec8fccc9211e560335c7: Status 404 returned error can't find the container with id ec031f5f6f5fe52ecf304ccc313dc18d36e7e707f780ec8fccc9211e560335c7 Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.838772 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" event={"ID":"c1cb683f-398f-4145-aa62-96ecbb02e82d","Type":"ContainerStarted","Data":"ec031f5f6f5fe52ecf304ccc313dc18d36e7e707f780ec8fccc9211e560335c7"} Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.845480 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4dg" event={"ID":"ad18ff50-d577-4b5e-979b-1d7e86020f4d","Type":"ContainerStarted","Data":"57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb"} Jan 28 13:13:05 crc kubenswrapper[4848]: I0128 13:13:05.897384 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:13:06 crc kubenswrapper[4848]: I0128 13:13:06.871161 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" event={"ID":"c1cb683f-398f-4145-aa62-96ecbb02e82d","Type":"ContainerStarted","Data":"4120948bb42c22570383444e750e1da6871c29ac655b0d641f6919049d472a8d"} Jan 28 13:13:06 crc kubenswrapper[4848]: I0128 13:13:06.882337 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" podStartSLOduration=2.487682884 podStartE2EDuration="2.882314478s" podCreationTimestamp="2026-01-28 13:13:04 +0000 UTC" firstStartedPulling="2026-01-28 13:13:05.805662089 +0000 UTC m=+1612.717879127" lastFinishedPulling="2026-01-28 13:13:06.200293683 +0000 UTC m=+1613.112510721" observedRunningTime="2026-01-28 13:13:06.880321003 +0000 UTC m=+1613.792538041" watchObservedRunningTime="2026-01-28 13:13:06.882314478 +0000 UTC m=+1613.794531526" Jan 28 13:13:07 crc kubenswrapper[4848]: I0128 13:13:07.294628 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdsql"] Jan 28 13:13:07 crc kubenswrapper[4848]: I0128 13:13:07.721527 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 28 13:13:07 crc kubenswrapper[4848]: I0128 13:13:07.887032 4848 generic.go:334] "Generic (PLEG): container finished" podID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerID="57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb" exitCode=0 Jan 28 13:13:07 crc kubenswrapper[4848]: I0128 13:13:07.887148 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4dg" event={"ID":"ad18ff50-d577-4b5e-979b-1d7e86020f4d","Type":"ContainerDied","Data":"57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb"} Jan 28 13:13:07 crc kubenswrapper[4848]: I0128 13:13:07.891487 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tdsql" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="registry-server" containerID="cri-o://67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d" gracePeriod=2 Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.398428 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.523741 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-catalog-content\") pod \"cb7d4944-168d-4dbf-a725-66f110c1566b\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.524686 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9cmn\" (UniqueName: \"kubernetes.io/projected/cb7d4944-168d-4dbf-a725-66f110c1566b-kube-api-access-v9cmn\") pod \"cb7d4944-168d-4dbf-a725-66f110c1566b\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.524880 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-utilities\") pod \"cb7d4944-168d-4dbf-a725-66f110c1566b\" (UID: \"cb7d4944-168d-4dbf-a725-66f110c1566b\") " Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.526013 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-utilities" (OuterVolumeSpecName: "utilities") pod "cb7d4944-168d-4dbf-a725-66f110c1566b" (UID: "cb7d4944-168d-4dbf-a725-66f110c1566b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.534457 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb7d4944-168d-4dbf-a725-66f110c1566b-kube-api-access-v9cmn" (OuterVolumeSpecName: "kube-api-access-v9cmn") pod "cb7d4944-168d-4dbf-a725-66f110c1566b" (UID: "cb7d4944-168d-4dbf-a725-66f110c1566b"). InnerVolumeSpecName "kube-api-access-v9cmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.584091 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb7d4944-168d-4dbf-a725-66f110c1566b" (UID: "cb7d4944-168d-4dbf-a725-66f110c1566b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.627607 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.627637 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9cmn\" (UniqueName: \"kubernetes.io/projected/cb7d4944-168d-4dbf-a725-66f110c1566b-kube-api-access-v9cmn\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.627651 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7d4944-168d-4dbf-a725-66f110c1566b-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.898461 4848 generic.go:334] "Generic (PLEG): container finished" podID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerID="67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d" exitCode=0 Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.898569 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdsql" event={"ID":"cb7d4944-168d-4dbf-a725-66f110c1566b","Type":"ContainerDied","Data":"67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d"} Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.898604 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdsql" event={"ID":"cb7d4944-168d-4dbf-a725-66f110c1566b","Type":"ContainerDied","Data":"a9b94e84e2672a9c8239a914b2ec65d3bab03d4ef7766d444f5938ec20c137e4"} Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.898622 4848 scope.go:117] "RemoveContainer" containerID="67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.898762 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdsql" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.908064 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4dg" event={"ID":"ad18ff50-d577-4b5e-979b-1d7e86020f4d","Type":"ContainerStarted","Data":"cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba"} Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.920522 4848 scope.go:117] "RemoveContainer" containerID="dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.942725 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdsql"] Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.956511 4848 scope.go:117] "RemoveContainer" containerID="55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.956617 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tdsql"] Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.969577 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-st4dg" podStartSLOduration=3.351045537 podStartE2EDuration="6.969558921s" podCreationTimestamp="2026-01-28 13:13:02 +0000 UTC" firstStartedPulling="2026-01-28 13:13:04.825063635 +0000 UTC m=+1611.737280673" lastFinishedPulling="2026-01-28 13:13:08.443577019 +0000 UTC m=+1615.355794057" observedRunningTime="2026-01-28 13:13:08.943333345 +0000 UTC m=+1615.855550393" watchObservedRunningTime="2026-01-28 13:13:08.969558921 +0000 UTC m=+1615.881775959" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.976862 4848 scope.go:117] "RemoveContainer" containerID="67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d" Jan 28 13:13:08 crc kubenswrapper[4848]: E0128 13:13:08.977754 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d\": container with ID starting with 67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d not found: ID does not exist" containerID="67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.977786 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d"} err="failed to get container status \"67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d\": rpc error: code = NotFound desc = could not find container \"67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d\": container with ID starting with 67c8ed49346054701dbb12709cd0065b0e3e19464e31ef8430228be150cf5f3d not found: ID does not exist" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.977807 4848 scope.go:117] "RemoveContainer" containerID="dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277" Jan 28 13:13:08 crc kubenswrapper[4848]: E0128 13:13:08.978088 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277\": container with ID starting with dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277 not found: ID does not exist" containerID="dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.978113 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277"} err="failed to get container status \"dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277\": rpc error: code = NotFound desc = could not find container \"dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277\": container with ID starting with dee8637aef5743d67edaef298de741e75b3b05014e35771c36d521e8f5220277 not found: ID does not exist" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.978126 4848 scope.go:117] "RemoveContainer" containerID="55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c" Jan 28 13:13:08 crc kubenswrapper[4848]: E0128 13:13:08.978369 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c\": container with ID starting with 55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c not found: ID does not exist" containerID="55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c" Jan 28 13:13:08 crc kubenswrapper[4848]: I0128 13:13:08.978390 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c"} err="failed to get container status \"55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c\": rpc error: code = NotFound desc = could not find container \"55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c\": container with ID starting with 55c4746fd9d80c373b57e1b05f42eff77d238ea1fed99d5f7cabc23de261912c not found: ID does not exist" Jan 28 13:13:09 crc kubenswrapper[4848]: I0128 13:13:09.922716 4848 generic.go:334] "Generic (PLEG): container finished" podID="c1cb683f-398f-4145-aa62-96ecbb02e82d" containerID="4120948bb42c22570383444e750e1da6871c29ac655b0d641f6919049d472a8d" exitCode=0 Jan 28 13:13:09 crc kubenswrapper[4848]: I0128 13:13:09.922853 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" event={"ID":"c1cb683f-398f-4145-aa62-96ecbb02e82d","Type":"ContainerDied","Data":"4120948bb42c22570383444e750e1da6871c29ac655b0d641f6919049d472a8d"} Jan 28 13:13:10 crc kubenswrapper[4848]: I0128 13:13:10.866890 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" path="/var/lib/kubelet/pods/cb7d4944-168d-4dbf-a725-66f110c1566b/volumes" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.540342 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.642867 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-inventory\") pod \"c1cb683f-398f-4145-aa62-96ecbb02e82d\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.642989 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-ssh-key-openstack-edpm-ipam\") pod \"c1cb683f-398f-4145-aa62-96ecbb02e82d\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.643056 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmr8v\" (UniqueName: \"kubernetes.io/projected/c1cb683f-398f-4145-aa62-96ecbb02e82d-kube-api-access-cmr8v\") pod \"c1cb683f-398f-4145-aa62-96ecbb02e82d\" (UID: \"c1cb683f-398f-4145-aa62-96ecbb02e82d\") " Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.650401 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1cb683f-398f-4145-aa62-96ecbb02e82d-kube-api-access-cmr8v" (OuterVolumeSpecName: "kube-api-access-cmr8v") pod "c1cb683f-398f-4145-aa62-96ecbb02e82d" (UID: "c1cb683f-398f-4145-aa62-96ecbb02e82d"). InnerVolumeSpecName "kube-api-access-cmr8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.680132 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c1cb683f-398f-4145-aa62-96ecbb02e82d" (UID: "c1cb683f-398f-4145-aa62-96ecbb02e82d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.687516 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-inventory" (OuterVolumeSpecName: "inventory") pod "c1cb683f-398f-4145-aa62-96ecbb02e82d" (UID: "c1cb683f-398f-4145-aa62-96ecbb02e82d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.746486 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.746526 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c1cb683f-398f-4145-aa62-96ecbb02e82d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.746537 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmr8v\" (UniqueName: \"kubernetes.io/projected/c1cb683f-398f-4145-aa62-96ecbb02e82d-kube-api-access-cmr8v\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.950855 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" event={"ID":"c1cb683f-398f-4145-aa62-96ecbb02e82d","Type":"ContainerDied","Data":"ec031f5f6f5fe52ecf304ccc313dc18d36e7e707f780ec8fccc9211e560335c7"} Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.950915 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec031f5f6f5fe52ecf304ccc313dc18d36e7e707f780ec8fccc9211e560335c7" Jan 28 13:13:11 crc kubenswrapper[4848]: I0128 13:13:11.950955 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-w9xdz" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.108075 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9"] Jan 28 13:13:12 crc kubenswrapper[4848]: E0128 13:13:12.108884 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="extract-content" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.108910 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="extract-content" Jan 28 13:13:12 crc kubenswrapper[4848]: E0128 13:13:12.108933 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="registry-server" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.108945 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="registry-server" Jan 28 13:13:12 crc kubenswrapper[4848]: E0128 13:13:12.108978 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="extract-utilities" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.108991 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="extract-utilities" Jan 28 13:13:12 crc kubenswrapper[4848]: E0128 13:13:12.109038 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1cb683f-398f-4145-aa62-96ecbb02e82d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.109051 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1cb683f-398f-4145-aa62-96ecbb02e82d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.109453 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1cb683f-398f-4145-aa62-96ecbb02e82d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.109499 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb7d4944-168d-4dbf-a725-66f110c1566b" containerName="registry-server" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.110825 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.115476 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.115919 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.115709 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.116397 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.124350 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9"] Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.155083 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwjxs\" (UniqueName: \"kubernetes.io/projected/64b9b93d-fe00-440a-88b0-dbb5f4621be9-kube-api-access-jwjxs\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.155223 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.155307 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.155360 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.258390 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwjxs\" (UniqueName: \"kubernetes.io/projected/64b9b93d-fe00-440a-88b0-dbb5f4621be9-kube-api-access-jwjxs\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.258636 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.258761 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.258873 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.264701 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.264767 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.265012 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.278273 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwjxs\" (UniqueName: \"kubernetes.io/projected/64b9b93d-fe00-440a-88b0-dbb5f4621be9-kube-api-access-jwjxs\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:12 crc kubenswrapper[4848]: I0128 13:13:12.433787 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:13:13 crc kubenswrapper[4848]: I0128 13:13:13.017476 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:13 crc kubenswrapper[4848]: I0128 13:13:13.018129 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:13 crc kubenswrapper[4848]: I0128 13:13:13.084663 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:13 crc kubenswrapper[4848]: I0128 13:13:13.217479 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9"] Jan 28 13:13:13 crc kubenswrapper[4848]: I0128 13:13:13.979493 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" event={"ID":"64b9b93d-fe00-440a-88b0-dbb5f4621be9","Type":"ContainerStarted","Data":"3ee0b180a4b21aa710071d51f44621b0a761bb45be23a88186db3574cd8c349c"} Jan 28 13:13:14 crc kubenswrapper[4848]: I0128 13:13:14.002461 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" podStartSLOduration=1.547573799 podStartE2EDuration="2.002425977s" podCreationTimestamp="2026-01-28 13:13:12 +0000 UTC" firstStartedPulling="2026-01-28 13:13:13.214061846 +0000 UTC m=+1620.126278884" lastFinishedPulling="2026-01-28 13:13:13.668914004 +0000 UTC m=+1620.581131062" observedRunningTime="2026-01-28 13:13:13.996905666 +0000 UTC m=+1620.909122724" watchObservedRunningTime="2026-01-28 13:13:14.002425977 +0000 UTC m=+1620.914643015" Jan 28 13:13:14 crc kubenswrapper[4848]: I0128 13:13:14.041932 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:14 crc kubenswrapper[4848]: I0128 13:13:14.287477 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-st4dg"] Jan 28 13:13:14 crc kubenswrapper[4848]: I0128 13:13:14.992787 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" event={"ID":"64b9b93d-fe00-440a-88b0-dbb5f4621be9","Type":"ContainerStarted","Data":"4d2d83e3007b7e3eb8a4e8ef907d8253802d2f6eef604cf363c2efabcf06a436"} Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.007068 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-st4dg" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="registry-server" containerID="cri-o://cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba" gracePeriod=2 Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.544857 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.702432 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-catalog-content\") pod \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.702530 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-utilities\") pod \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.702844 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ms9rv\" (UniqueName: \"kubernetes.io/projected/ad18ff50-d577-4b5e-979b-1d7e86020f4d-kube-api-access-ms9rv\") pod \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\" (UID: \"ad18ff50-d577-4b5e-979b-1d7e86020f4d\") " Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.703583 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-utilities" (OuterVolumeSpecName: "utilities") pod "ad18ff50-d577-4b5e-979b-1d7e86020f4d" (UID: "ad18ff50-d577-4b5e-979b-1d7e86020f4d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.703909 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.712647 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad18ff50-d577-4b5e-979b-1d7e86020f4d-kube-api-access-ms9rv" (OuterVolumeSpecName: "kube-api-access-ms9rv") pod "ad18ff50-d577-4b5e-979b-1d7e86020f4d" (UID: "ad18ff50-d577-4b5e-979b-1d7e86020f4d"). InnerVolumeSpecName "kube-api-access-ms9rv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.753401 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad18ff50-d577-4b5e-979b-1d7e86020f4d" (UID: "ad18ff50-d577-4b5e-979b-1d7e86020f4d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.806262 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ms9rv\" (UniqueName: \"kubernetes.io/projected/ad18ff50-d577-4b5e-979b-1d7e86020f4d-kube-api-access-ms9rv\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:16 crc kubenswrapper[4848]: I0128 13:13:16.806312 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad18ff50-d577-4b5e-979b-1d7e86020f4d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.023724 4848 generic.go:334] "Generic (PLEG): container finished" podID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerID="cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba" exitCode=0 Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.023832 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4dg" event={"ID":"ad18ff50-d577-4b5e-979b-1d7e86020f4d","Type":"ContainerDied","Data":"cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba"} Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.023915 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4dg" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.023954 4848 scope.go:117] "RemoveContainer" containerID="cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.023930 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4dg" event={"ID":"ad18ff50-d577-4b5e-979b-1d7e86020f4d","Type":"ContainerDied","Data":"004ffa8156dbd32893b1ba8493d1f57a03aacd2983db949b8333f39e2c033dfa"} Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.065160 4848 scope.go:117] "RemoveContainer" containerID="57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.065929 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-st4dg"] Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.080545 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-st4dg"] Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.091079 4848 scope.go:117] "RemoveContainer" containerID="d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.162124 4848 scope.go:117] "RemoveContainer" containerID="cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba" Jan 28 13:13:17 crc kubenswrapper[4848]: E0128 13:13:17.163017 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba\": container with ID starting with cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba not found: ID does not exist" containerID="cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.163076 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba"} err="failed to get container status \"cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba\": rpc error: code = NotFound desc = could not find container \"cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba\": container with ID starting with cb1139fa6708a592da0c74057e05219a91b7dc3347decf81fef0db83170f40ba not found: ID does not exist" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.163113 4848 scope.go:117] "RemoveContainer" containerID="57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb" Jan 28 13:13:17 crc kubenswrapper[4848]: E0128 13:13:17.163857 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb\": container with ID starting with 57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb not found: ID does not exist" containerID="57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.163918 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb"} err="failed to get container status \"57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb\": rpc error: code = NotFound desc = could not find container \"57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb\": container with ID starting with 57d938bc26cf99b8d322c557376188d8f319dc8b919504e5a8f517b80d59fedb not found: ID does not exist" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.163957 4848 scope.go:117] "RemoveContainer" containerID="d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54" Jan 28 13:13:17 crc kubenswrapper[4848]: E0128 13:13:17.164632 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54\": container with ID starting with d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54 not found: ID does not exist" containerID="d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54" Jan 28 13:13:17 crc kubenswrapper[4848]: I0128 13:13:17.164667 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54"} err="failed to get container status \"d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54\": rpc error: code = NotFound desc = could not find container \"d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54\": container with ID starting with d4e48a9e22ea436e5d6c20d75e422bee05027057d0728822c27b2b43aea4bb54 not found: ID does not exist" Jan 28 13:13:18 crc kubenswrapper[4848]: I0128 13:13:18.872639 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" path="/var/lib/kubelet/pods/ad18ff50-d577-4b5e-979b-1d7e86020f4d/volumes" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.803813 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tvdz5"] Jan 28 13:13:37 crc kubenswrapper[4848]: E0128 13:13:37.804985 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="extract-utilities" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.805003 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="extract-utilities" Jan 28 13:13:37 crc kubenswrapper[4848]: E0128 13:13:37.805024 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="registry-server" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.805030 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="registry-server" Jan 28 13:13:37 crc kubenswrapper[4848]: E0128 13:13:37.805060 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="extract-content" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.805067 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="extract-content" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.805314 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad18ff50-d577-4b5e-979b-1d7e86020f4d" containerName="registry-server" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.807125 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.821740 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tvdz5"] Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.924509 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.924600 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.944038 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-catalog-content\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.944097 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-utilities\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:37 crc kubenswrapper[4848]: I0128 13:13:37.944335 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gknt\" (UniqueName: \"kubernetes.io/projected/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-kube-api-access-9gknt\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.048439 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-catalog-content\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.048508 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-utilities\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.048561 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gknt\" (UniqueName: \"kubernetes.io/projected/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-kube-api-access-9gknt\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.049188 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-catalog-content\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.049509 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-utilities\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.073611 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gknt\" (UniqueName: \"kubernetes.io/projected/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-kube-api-access-9gknt\") pod \"redhat-marketplace-tvdz5\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.138434 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:38 crc kubenswrapper[4848]: I0128 13:13:38.661441 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tvdz5"] Jan 28 13:13:39 crc kubenswrapper[4848]: I0128 13:13:39.322532 4848 generic.go:334] "Generic (PLEG): container finished" podID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerID="9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42" exitCode=0 Jan 28 13:13:39 crc kubenswrapper[4848]: I0128 13:13:39.322594 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tvdz5" event={"ID":"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9","Type":"ContainerDied","Data":"9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42"} Jan 28 13:13:39 crc kubenswrapper[4848]: I0128 13:13:39.322631 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tvdz5" event={"ID":"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9","Type":"ContainerStarted","Data":"f87f9d104a37370a7fb8e574904c0d1555390fb1aa6258efda6a6cbba85e7d57"} Jan 28 13:13:41 crc kubenswrapper[4848]: I0128 13:13:41.385659 4848 generic.go:334] "Generic (PLEG): container finished" podID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerID="536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7" exitCode=0 Jan 28 13:13:41 crc kubenswrapper[4848]: I0128 13:13:41.385819 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tvdz5" event={"ID":"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9","Type":"ContainerDied","Data":"536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7"} Jan 28 13:13:42 crc kubenswrapper[4848]: I0128 13:13:42.401314 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tvdz5" event={"ID":"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9","Type":"ContainerStarted","Data":"b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106"} Jan 28 13:13:42 crc kubenswrapper[4848]: I0128 13:13:42.430989 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tvdz5" podStartSLOduration=2.9276082580000002 podStartE2EDuration="5.430965798s" podCreationTimestamp="2026-01-28 13:13:37 +0000 UTC" firstStartedPulling="2026-01-28 13:13:39.324967191 +0000 UTC m=+1646.237184229" lastFinishedPulling="2026-01-28 13:13:41.828324731 +0000 UTC m=+1648.740541769" observedRunningTime="2026-01-28 13:13:42.422501516 +0000 UTC m=+1649.334718574" watchObservedRunningTime="2026-01-28 13:13:42.430965798 +0000 UTC m=+1649.343182836" Jan 28 13:13:48 crc kubenswrapper[4848]: I0128 13:13:48.139155 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:48 crc kubenswrapper[4848]: I0128 13:13:48.139901 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:48 crc kubenswrapper[4848]: I0128 13:13:48.191572 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:48 crc kubenswrapper[4848]: I0128 13:13:48.521058 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:48 crc kubenswrapper[4848]: I0128 13:13:48.584393 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tvdz5"] Jan 28 13:13:50 crc kubenswrapper[4848]: I0128 13:13:50.498166 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tvdz5" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="registry-server" containerID="cri-o://b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106" gracePeriod=2 Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.002278 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.190691 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-utilities\") pod \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.190914 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-catalog-content\") pod \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.190964 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gknt\" (UniqueName: \"kubernetes.io/projected/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-kube-api-access-9gknt\") pod \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\" (UID: \"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9\") " Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.192072 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-utilities" (OuterVolumeSpecName: "utilities") pod "854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" (UID: "854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.197372 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-kube-api-access-9gknt" (OuterVolumeSpecName: "kube-api-access-9gknt") pod "854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" (UID: "854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9"). InnerVolumeSpecName "kube-api-access-9gknt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.216007 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" (UID: "854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.294755 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.295408 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.295483 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gknt\" (UniqueName: \"kubernetes.io/projected/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9-kube-api-access-9gknt\") on node \"crc\" DevicePath \"\"" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.511717 4848 generic.go:334] "Generic (PLEG): container finished" podID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerID="b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106" exitCode=0 Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.511812 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tvdz5" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.511790 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tvdz5" event={"ID":"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9","Type":"ContainerDied","Data":"b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106"} Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.511974 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tvdz5" event={"ID":"854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9","Type":"ContainerDied","Data":"f87f9d104a37370a7fb8e574904c0d1555390fb1aa6258efda6a6cbba85e7d57"} Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.512005 4848 scope.go:117] "RemoveContainer" containerID="b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.545869 4848 scope.go:117] "RemoveContainer" containerID="536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.552765 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tvdz5"] Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.592805 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tvdz5"] Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.596682 4848 scope.go:117] "RemoveContainer" containerID="9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.636371 4848 scope.go:117] "RemoveContainer" containerID="b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106" Jan 28 13:13:51 crc kubenswrapper[4848]: E0128 13:13:51.637383 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106\": container with ID starting with b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106 not found: ID does not exist" containerID="b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.637434 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106"} err="failed to get container status \"b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106\": rpc error: code = NotFound desc = could not find container \"b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106\": container with ID starting with b59767d0781600b1e7f42c1f029fd699c712dfb38a4222ea7e93b49e970f6106 not found: ID does not exist" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.637466 4848 scope.go:117] "RemoveContainer" containerID="536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7" Jan 28 13:13:51 crc kubenswrapper[4848]: E0128 13:13:51.639049 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7\": container with ID starting with 536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7 not found: ID does not exist" containerID="536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.639083 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7"} err="failed to get container status \"536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7\": rpc error: code = NotFound desc = could not find container \"536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7\": container with ID starting with 536bbe488774dcc5f5cb0e69cb703c10d39f00b1a813acb14d95adfc4d66d6d7 not found: ID does not exist" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.639101 4848 scope.go:117] "RemoveContainer" containerID="9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42" Jan 28 13:13:51 crc kubenswrapper[4848]: E0128 13:13:51.639360 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42\": container with ID starting with 9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42 not found: ID does not exist" containerID="9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42" Jan 28 13:13:51 crc kubenswrapper[4848]: I0128 13:13:51.639389 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42"} err="failed to get container status \"9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42\": rpc error: code = NotFound desc = could not find container \"9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42\": container with ID starting with 9aadd885b41f9f2511aab9b616f6adc05cdf35b683f145cb290b25858173dd42 not found: ID does not exist" Jan 28 13:13:52 crc kubenswrapper[4848]: I0128 13:13:52.864114 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" path="/var/lib/kubelet/pods/854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9/volumes" Jan 28 13:14:02 crc kubenswrapper[4848]: I0128 13:14:02.543395 4848 scope.go:117] "RemoveContainer" containerID="e1831ca876ba45efafdbcc3a2920a1ecea5c5b0cf626b909f3798d32419fcaba" Jan 28 13:14:07 crc kubenswrapper[4848]: I0128 13:14:07.925237 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:14:07 crc kubenswrapper[4848]: I0128 13:14:07.926812 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:14:37 crc kubenswrapper[4848]: I0128 13:14:37.925026 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:14:37 crc kubenswrapper[4848]: I0128 13:14:37.925818 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:14:37 crc kubenswrapper[4848]: I0128 13:14:37.925892 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:14:37 crc kubenswrapper[4848]: I0128 13:14:37.927006 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:14:37 crc kubenswrapper[4848]: I0128 13:14:37.927083 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" gracePeriod=600 Jan 28 13:14:38 crc kubenswrapper[4848]: E0128 13:14:38.062173 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:14:39 crc kubenswrapper[4848]: I0128 13:14:39.079064 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" exitCode=0 Jan 28 13:14:39 crc kubenswrapper[4848]: I0128 13:14:39.079167 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9"} Jan 28 13:14:39 crc kubenswrapper[4848]: I0128 13:14:39.081797 4848 scope.go:117] "RemoveContainer" containerID="f2e69a4a3785c5d66035fd792a22c202c11766e978faf8a57dcccebf228af87c" Jan 28 13:14:39 crc kubenswrapper[4848]: I0128 13:14:39.083216 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:14:39 crc kubenswrapper[4848]: E0128 13:14:39.086292 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:14:53 crc kubenswrapper[4848]: I0128 13:14:53.851358 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:14:53 crc kubenswrapper[4848]: E0128 13:14:53.852403 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.169734 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n"] Jan 28 13:15:00 crc kubenswrapper[4848]: E0128 13:15:00.171711 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="extract-content" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.171753 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="extract-content" Jan 28 13:15:00 crc kubenswrapper[4848]: E0128 13:15:00.171799 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="extract-utilities" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.171821 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="extract-utilities" Jan 28 13:15:00 crc kubenswrapper[4848]: E0128 13:15:00.171876 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="registry-server" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.171892 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="registry-server" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.172441 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="854703d2-5ae5-4d8a-b4bb-e7533d9c0ed9" containerName="registry-server" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.174229 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.177607 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.178188 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.203440 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n"] Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.224843 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/262cf1c7-aa29-451d-b27c-8df1174110f1-config-volume\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.224912 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/262cf1c7-aa29-451d-b27c-8df1174110f1-secret-volume\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.225011 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkjhq\" (UniqueName: \"kubernetes.io/projected/262cf1c7-aa29-451d-b27c-8df1174110f1-kube-api-access-lkjhq\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.331039 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkjhq\" (UniqueName: \"kubernetes.io/projected/262cf1c7-aa29-451d-b27c-8df1174110f1-kube-api-access-lkjhq\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.331588 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/262cf1c7-aa29-451d-b27c-8df1174110f1-config-volume\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.331665 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/262cf1c7-aa29-451d-b27c-8df1174110f1-secret-volume\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.346561 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/262cf1c7-aa29-451d-b27c-8df1174110f1-config-volume\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.365354 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkjhq\" (UniqueName: \"kubernetes.io/projected/262cf1c7-aa29-451d-b27c-8df1174110f1-kube-api-access-lkjhq\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.376126 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/262cf1c7-aa29-451d-b27c-8df1174110f1-secret-volume\") pod \"collect-profiles-29493435-ch48n\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:00 crc kubenswrapper[4848]: I0128 13:15:00.504834 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:01 crc kubenswrapper[4848]: I0128 13:15:01.003047 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n"] Jan 28 13:15:01 crc kubenswrapper[4848]: I0128 13:15:01.423161 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" event={"ID":"262cf1c7-aa29-451d-b27c-8df1174110f1","Type":"ContainerStarted","Data":"ccc60d17a7aa56bf24a87dc8495b8207b9aae3906a76f2a7f744e56cddf103f9"} Jan 28 13:15:01 crc kubenswrapper[4848]: I0128 13:15:01.424427 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" event={"ID":"262cf1c7-aa29-451d-b27c-8df1174110f1","Type":"ContainerStarted","Data":"18dc356e3678a909924ed1705508e8fd187ec7b0a4a8f7eafc993ef26c4549c0"} Jan 28 13:15:01 crc kubenswrapper[4848]: I0128 13:15:01.450720 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" podStartSLOduration=1.450691415 podStartE2EDuration="1.450691415s" podCreationTimestamp="2026-01-28 13:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:15:01.438415759 +0000 UTC m=+1728.350632797" watchObservedRunningTime="2026-01-28 13:15:01.450691415 +0000 UTC m=+1728.362908453" Jan 28 13:15:02 crc kubenswrapper[4848]: I0128 13:15:02.446482 4848 generic.go:334] "Generic (PLEG): container finished" podID="262cf1c7-aa29-451d-b27c-8df1174110f1" containerID="ccc60d17a7aa56bf24a87dc8495b8207b9aae3906a76f2a7f744e56cddf103f9" exitCode=0 Jan 28 13:15:02 crc kubenswrapper[4848]: I0128 13:15:02.446622 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" event={"ID":"262cf1c7-aa29-451d-b27c-8df1174110f1","Type":"ContainerDied","Data":"ccc60d17a7aa56bf24a87dc8495b8207b9aae3906a76f2a7f744e56cddf103f9"} Jan 28 13:15:03 crc kubenswrapper[4848]: I0128 13:15:03.838675 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:03 crc kubenswrapper[4848]: I0128 13:15:03.925062 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/262cf1c7-aa29-451d-b27c-8df1174110f1-secret-volume\") pod \"262cf1c7-aa29-451d-b27c-8df1174110f1\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " Jan 28 13:15:03 crc kubenswrapper[4848]: I0128 13:15:03.925184 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/262cf1c7-aa29-451d-b27c-8df1174110f1-config-volume\") pod \"262cf1c7-aa29-451d-b27c-8df1174110f1\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " Jan 28 13:15:03 crc kubenswrapper[4848]: I0128 13:15:03.925220 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkjhq\" (UniqueName: \"kubernetes.io/projected/262cf1c7-aa29-451d-b27c-8df1174110f1-kube-api-access-lkjhq\") pod \"262cf1c7-aa29-451d-b27c-8df1174110f1\" (UID: \"262cf1c7-aa29-451d-b27c-8df1174110f1\") " Jan 28 13:15:03 crc kubenswrapper[4848]: I0128 13:15:03.926458 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/262cf1c7-aa29-451d-b27c-8df1174110f1-config-volume" (OuterVolumeSpecName: "config-volume") pod "262cf1c7-aa29-451d-b27c-8df1174110f1" (UID: "262cf1c7-aa29-451d-b27c-8df1174110f1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:15:03 crc kubenswrapper[4848]: I0128 13:15:03.933081 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/262cf1c7-aa29-451d-b27c-8df1174110f1-kube-api-access-lkjhq" (OuterVolumeSpecName: "kube-api-access-lkjhq") pod "262cf1c7-aa29-451d-b27c-8df1174110f1" (UID: "262cf1c7-aa29-451d-b27c-8df1174110f1"). InnerVolumeSpecName "kube-api-access-lkjhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:15:03 crc kubenswrapper[4848]: I0128 13:15:03.937923 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/262cf1c7-aa29-451d-b27c-8df1174110f1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "262cf1c7-aa29-451d-b27c-8df1174110f1" (UID: "262cf1c7-aa29-451d-b27c-8df1174110f1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:15:04 crc kubenswrapper[4848]: I0128 13:15:04.027201 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/262cf1c7-aa29-451d-b27c-8df1174110f1-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:15:04 crc kubenswrapper[4848]: I0128 13:15:04.027240 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/262cf1c7-aa29-451d-b27c-8df1174110f1-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:15:04 crc kubenswrapper[4848]: I0128 13:15:04.027278 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkjhq\" (UniqueName: \"kubernetes.io/projected/262cf1c7-aa29-451d-b27c-8df1174110f1-kube-api-access-lkjhq\") on node \"crc\" DevicePath \"\"" Jan 28 13:15:04 crc kubenswrapper[4848]: I0128 13:15:04.477783 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" event={"ID":"262cf1c7-aa29-451d-b27c-8df1174110f1","Type":"ContainerDied","Data":"18dc356e3678a909924ed1705508e8fd187ec7b0a4a8f7eafc993ef26c4549c0"} Jan 28 13:15:04 crc kubenswrapper[4848]: I0128 13:15:04.477827 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18dc356e3678a909924ed1705508e8fd187ec7b0a4a8f7eafc993ef26c4549c0" Jan 28 13:15:04 crc kubenswrapper[4848]: I0128 13:15:04.477876 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n" Jan 28 13:15:04 crc kubenswrapper[4848]: I0128 13:15:04.858939 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:15:04 crc kubenswrapper[4848]: E0128 13:15:04.860968 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:15:17 crc kubenswrapper[4848]: I0128 13:15:17.851109 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:15:17 crc kubenswrapper[4848]: E0128 13:15:17.852404 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:15:28 crc kubenswrapper[4848]: I0128 13:15:28.850408 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:15:28 crc kubenswrapper[4848]: E0128 13:15:28.851541 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:15:40 crc kubenswrapper[4848]: I0128 13:15:40.849793 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:15:40 crc kubenswrapper[4848]: E0128 13:15:40.850958 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:15:51 crc kubenswrapper[4848]: I0128 13:15:51.850167 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:15:51 crc kubenswrapper[4848]: E0128 13:15:51.851126 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:16:02 crc kubenswrapper[4848]: I0128 13:16:02.701016 4848 scope.go:117] "RemoveContainer" containerID="34d88ca7447387fa85383c08aa3b97091dadd666344f329ef1871c43238d4a95" Jan 28 13:16:02 crc kubenswrapper[4848]: I0128 13:16:02.737671 4848 scope.go:117] "RemoveContainer" containerID="ff6bcd020e1d5b86640179b0cf8c475ec083f755cb9aefff9fee43ff95ff1da2" Jan 28 13:16:02 crc kubenswrapper[4848]: I0128 13:16:02.767119 4848 scope.go:117] "RemoveContainer" containerID="a129e1f80e2f4594d9eb1de7a451a1b981081b1263d8f3a82c215ab2910073b8" Jan 28 13:16:02 crc kubenswrapper[4848]: I0128 13:16:02.803200 4848 scope.go:117] "RemoveContainer" containerID="c4f6ae1377600fc12b1953698d273ef9327508721256a58a7f4f2a6d66948e14" Jan 28 13:16:05 crc kubenswrapper[4848]: I0128 13:16:05.850580 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:16:05 crc kubenswrapper[4848]: E0128 13:16:05.851466 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:16:20 crc kubenswrapper[4848]: I0128 13:16:20.849853 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:16:20 crc kubenswrapper[4848]: E0128 13:16:20.850987 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:16:21 crc kubenswrapper[4848]: I0128 13:16:21.431206 4848 generic.go:334] "Generic (PLEG): container finished" podID="64b9b93d-fe00-440a-88b0-dbb5f4621be9" containerID="4d2d83e3007b7e3eb8a4e8ef907d8253802d2f6eef604cf363c2efabcf06a436" exitCode=0 Jan 28 13:16:21 crc kubenswrapper[4848]: I0128 13:16:21.431306 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" event={"ID":"64b9b93d-fe00-440a-88b0-dbb5f4621be9","Type":"ContainerDied","Data":"4d2d83e3007b7e3eb8a4e8ef907d8253802d2f6eef604cf363c2efabcf06a436"} Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.025822 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.060454 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-9328-account-create-update-8vsqt"] Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.081989 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwjxs\" (UniqueName: \"kubernetes.io/projected/64b9b93d-fe00-440a-88b0-dbb5f4621be9-kube-api-access-jwjxs\") pod \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.082167 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-inventory\") pod \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.082375 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-ssh-key-openstack-edpm-ipam\") pod \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.082544 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-bootstrap-combined-ca-bundle\") pod \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\" (UID: \"64b9b93d-fe00-440a-88b0-dbb5f4621be9\") " Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.092606 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "64b9b93d-fe00-440a-88b0-dbb5f4621be9" (UID: "64b9b93d-fe00-440a-88b0-dbb5f4621be9"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.093945 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64b9b93d-fe00-440a-88b0-dbb5f4621be9-kube-api-access-jwjxs" (OuterVolumeSpecName: "kube-api-access-jwjxs") pod "64b9b93d-fe00-440a-88b0-dbb5f4621be9" (UID: "64b9b93d-fe00-440a-88b0-dbb5f4621be9"). InnerVolumeSpecName "kube-api-access-jwjxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.107692 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-j2whm"] Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.122260 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-inventory" (OuterVolumeSpecName: "inventory") pod "64b9b93d-fe00-440a-88b0-dbb5f4621be9" (UID: "64b9b93d-fe00-440a-88b0-dbb5f4621be9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.125925 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "64b9b93d-fe00-440a-88b0-dbb5f4621be9" (UID: "64b9b93d-fe00-440a-88b0-dbb5f4621be9"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.126956 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-9328-account-create-update-8vsqt"] Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.150123 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-j2whm"] Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.187330 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.187374 4848 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.187397 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwjxs\" (UniqueName: \"kubernetes.io/projected/64b9b93d-fe00-440a-88b0-dbb5f4621be9-kube-api-access-jwjxs\") on node \"crc\" DevicePath \"\"" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.187417 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64b9b93d-fe00-440a-88b0-dbb5f4621be9-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.464802 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" event={"ID":"64b9b93d-fe00-440a-88b0-dbb5f4621be9","Type":"ContainerDied","Data":"3ee0b180a4b21aa710071d51f44621b0a761bb45be23a88186db3574cd8c349c"} Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.464875 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ee0b180a4b21aa710071d51f44621b0a761bb45be23a88186db3574cd8c349c" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.464876 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.592656 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6"] Jan 28 13:16:23 crc kubenswrapper[4848]: E0128 13:16:23.593141 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="262cf1c7-aa29-451d-b27c-8df1174110f1" containerName="collect-profiles" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.593162 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="262cf1c7-aa29-451d-b27c-8df1174110f1" containerName="collect-profiles" Jan 28 13:16:23 crc kubenswrapper[4848]: E0128 13:16:23.593194 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64b9b93d-fe00-440a-88b0-dbb5f4621be9" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.593203 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="64b9b93d-fe00-440a-88b0-dbb5f4621be9" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.593404 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="64b9b93d-fe00-440a-88b0-dbb5f4621be9" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.593434 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="262cf1c7-aa29-451d-b27c-8df1174110f1" containerName="collect-profiles" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.594176 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.599551 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.602470 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.602569 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.602756 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.621357 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6"] Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.703364 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.703800 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.704208 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtfmq\" (UniqueName: \"kubernetes.io/projected/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-kube-api-access-xtfmq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.806263 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.806895 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtfmq\" (UniqueName: \"kubernetes.io/projected/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-kube-api-access-xtfmq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.807052 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.812376 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.812473 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.831770 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtfmq\" (UniqueName: \"kubernetes.io/projected/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-kube-api-access-xtfmq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:23 crc kubenswrapper[4848]: I0128 13:16:23.920040 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.082541 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-4ada-account-create-update-nj927"] Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.103120 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-mrvkg"] Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.120015 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-mrvkg"] Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.135959 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-4ada-account-create-update-nj927"] Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.158719 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-zk4jd"] Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.169331 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-zk4jd"] Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.547236 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6"] Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.555720 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.862777 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1147d1b1-a4fb-4bf5-ba1a-282cae5206d3" path="/var/lib/kubelet/pods/1147d1b1-a4fb-4bf5-ba1a-282cae5206d3/volumes" Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.863745 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17c614c4-a02a-4224-9ee5-b334451f0671" path="/var/lib/kubelet/pods/17c614c4-a02a-4224-9ee5-b334451f0671/volumes" Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.864356 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39291113-86f7-4ddb-a219-f67ba93d35cb" path="/var/lib/kubelet/pods/39291113-86f7-4ddb-a219-f67ba93d35cb/volumes" Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.864896 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d" path="/var/lib/kubelet/pods/b2dbef0d-9acf-4c11-8634-d0d8e0f88f5d/volumes" Jan 28 13:16:24 crc kubenswrapper[4848]: I0128 13:16:24.866232 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef583ead-2fa2-4c76-9753-2dc35141fdea" path="/var/lib/kubelet/pods/ef583ead-2fa2-4c76-9753-2dc35141fdea/volumes" Jan 28 13:16:25 crc kubenswrapper[4848]: I0128 13:16:25.040156 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-c692-account-create-update-9lnsr"] Jan 28 13:16:25 crc kubenswrapper[4848]: I0128 13:16:25.053992 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-c692-account-create-update-9lnsr"] Jan 28 13:16:25 crc kubenswrapper[4848]: I0128 13:16:25.489970 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" event={"ID":"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b","Type":"ContainerStarted","Data":"429c28a35c3f36f32c8b98617c957efaa4eb0c6e173bb275330e50d0cb9b5f75"} Jan 28 13:16:25 crc kubenswrapper[4848]: I0128 13:16:25.490546 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" event={"ID":"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b","Type":"ContainerStarted","Data":"9fbf9afd982f59eaaf9a6a6b3ff483f7f53ca5a2ef5050bdb705db3f28ba997f"} Jan 28 13:16:25 crc kubenswrapper[4848]: I0128 13:16:25.515214 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" podStartSLOduration=1.992015006 podStartE2EDuration="2.515189412s" podCreationTimestamp="2026-01-28 13:16:23 +0000 UTC" firstStartedPulling="2026-01-28 13:16:24.555490289 +0000 UTC m=+1811.467707327" lastFinishedPulling="2026-01-28 13:16:25.078664685 +0000 UTC m=+1811.990881733" observedRunningTime="2026-01-28 13:16:25.509957219 +0000 UTC m=+1812.422174287" watchObservedRunningTime="2026-01-28 13:16:25.515189412 +0000 UTC m=+1812.427406450" Jan 28 13:16:26 crc kubenswrapper[4848]: I0128 13:16:26.868506 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82066dd7-1c9c-4edf-b7f9-86eac39cb7ed" path="/var/lib/kubelet/pods/82066dd7-1c9c-4edf-b7f9-86eac39cb7ed/volumes" Jan 28 13:16:33 crc kubenswrapper[4848]: I0128 13:16:33.851009 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:16:33 crc kubenswrapper[4848]: E0128 13:16:33.852148 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:16:37 crc kubenswrapper[4848]: I0128 13:16:37.042662 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-cvcts"] Jan 28 13:16:37 crc kubenswrapper[4848]: I0128 13:16:37.058837 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-cvcts"] Jan 28 13:16:38 crc kubenswrapper[4848]: I0128 13:16:38.869357 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="576845fc-5259-4d19-be49-02ef9575eeb5" path="/var/lib/kubelet/pods/576845fc-5259-4d19-be49-02ef9575eeb5/volumes" Jan 28 13:16:44 crc kubenswrapper[4848]: I0128 13:16:44.858032 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:16:44 crc kubenswrapper[4848]: E0128 13:16:44.859235 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:16:57 crc kubenswrapper[4848]: I0128 13:16:57.851798 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:16:57 crc kubenswrapper[4848]: E0128 13:16:57.852665 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.097641 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-6a7f-account-create-update-t87b7"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.118903 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-f8f8-account-create-update-58wd2"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.132394 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-gk6gn"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.144766 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-k59cb"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.153973 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-6a7f-account-create-update-t87b7"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.163683 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-f8f8-account-create-update-58wd2"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.178529 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-gk6gn"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.208485 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-k59cb"] Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.866164 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12b5cee1-1fa4-493c-8888-92e58d63e28e" path="/var/lib/kubelet/pods/12b5cee1-1fa4-493c-8888-92e58d63e28e/volumes" Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.867504 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c572357-66c7-4bf0-b000-4881dca67248" path="/var/lib/kubelet/pods/1c572357-66c7-4bf0-b000-4881dca67248/volumes" Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.868319 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c87c4cea-2693-4000-8635-1fcc694ead7c" path="/var/lib/kubelet/pods/c87c4cea-2693-4000-8635-1fcc694ead7c/volumes" Jan 28 13:17:00 crc kubenswrapper[4848]: I0128 13:17:00.869078 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f18cf42c-0012-44b5-8fc3-697ff0dc8099" path="/var/lib/kubelet/pods/f18cf42c-0012-44b5-8fc3-697ff0dc8099/volumes" Jan 28 13:17:02 crc kubenswrapper[4848]: I0128 13:17:02.872322 4848 scope.go:117] "RemoveContainer" containerID="4e34736f998da61a923cfa47f535810b95340121c3a3127e7721dc875b77a9fa" Jan 28 13:17:02 crc kubenswrapper[4848]: I0128 13:17:02.903795 4848 scope.go:117] "RemoveContainer" containerID="5eac4dd96fb786c6328553b247219cd5bfaf69e43db6e6d2d650d107c4730ccb" Jan 28 13:17:02 crc kubenswrapper[4848]: I0128 13:17:02.956844 4848 scope.go:117] "RemoveContainer" containerID="015096506fe84fd94a5f4336b9ca7b958354133967fed931b410f66a04a59a41" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.010715 4848 scope.go:117] "RemoveContainer" containerID="66ee4cabd88ee13dfd685769c434a74dd50af053b3c616a8fe89511d3aed06f9" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.054789 4848 scope.go:117] "RemoveContainer" containerID="dfaae6c353997593f6249e80d22b036affd390ab264003abbdb4d81afffe9569" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.113780 4848 scope.go:117] "RemoveContainer" containerID="bcb830c61618da838f91e8173082a00a0d0e8264af4f5fa66b9d9e14f36a4aaa" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.168244 4848 scope.go:117] "RemoveContainer" containerID="cb38b7e52f59c0c15c68c1a5b20aadda1ffc67ce2a94107da8ea6b1e8aa07d06" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.209506 4848 scope.go:117] "RemoveContainer" containerID="c82da1fb8e51d3048fc7a160a77b16bb68479aa101185dfb0d86e6e4893c1d41" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.242018 4848 scope.go:117] "RemoveContainer" containerID="94056acd0176ebd5055fd650bb7d5ef005e92c59de8073cafe0eb28b375696f0" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.270985 4848 scope.go:117] "RemoveContainer" containerID="30434e89244ba3643953051b44a8ba55bf8188aca26b919d6708604913af95b3" Jan 28 13:17:03 crc kubenswrapper[4848]: I0128 13:17:03.344961 4848 scope.go:117] "RemoveContainer" containerID="04e9d70df11c0d2df711aadbff7a1fd5738fee22f9e4f57db8c3280ff13b8ab7" Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.053944 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-8283-account-create-update-6jh6j"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.073154 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-h859v"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.085548 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-ffc8h"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.096110 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-a957-account-create-update-wln2w"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.107151 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-8283-account-create-update-6jh6j"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.117359 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-h859v"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.128378 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-ffc8h"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.137678 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-a957-account-create-update-wln2w"] Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.870824 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cce9c77-ae44-4fa5-b025-d1b76d14c352" path="/var/lib/kubelet/pods/8cce9c77-ae44-4fa5-b025-d1b76d14c352/volumes" Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.872462 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae3018da-2942-415c-9f0e-c82ce76ecdfd" path="/var/lib/kubelet/pods/ae3018da-2942-415c-9f0e-c82ce76ecdfd/volumes" Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.873151 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5e91e63-da82-4c26-a0b7-1ab2f9b45396" path="/var/lib/kubelet/pods/b5e91e63-da82-4c26-a0b7-1ab2f9b45396/volumes" Jan 28 13:17:10 crc kubenswrapper[4848]: I0128 13:17:10.873911 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d414e9e0-a933-4e8d-b7c8-3a34a145aa9f" path="/var/lib/kubelet/pods/d414e9e0-a933-4e8d-b7c8-3a34a145aa9f/volumes" Jan 28 13:17:11 crc kubenswrapper[4848]: I0128 13:17:11.053743 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-5pkv8"] Jan 28 13:17:11 crc kubenswrapper[4848]: I0128 13:17:11.067333 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-5pkv8"] Jan 28 13:17:11 crc kubenswrapper[4848]: I0128 13:17:11.851137 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:17:11 crc kubenswrapper[4848]: E0128 13:17:11.851511 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:17:12 crc kubenswrapper[4848]: I0128 13:17:12.864852 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc9233f9-ae7a-46f8-bec3-97aa6db5e525" path="/var/lib/kubelet/pods/dc9233f9-ae7a-46f8-bec3-97aa6db5e525/volumes" Jan 28 13:17:18 crc kubenswrapper[4848]: I0128 13:17:18.065040 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-qhcv6"] Jan 28 13:17:18 crc kubenswrapper[4848]: I0128 13:17:18.077051 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-qhcv6"] Jan 28 13:17:18 crc kubenswrapper[4848]: I0128 13:17:18.864026 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08c6f464-d9ea-4ced-bfd0-498fcca6e0c7" path="/var/lib/kubelet/pods/08c6f464-d9ea-4ced-bfd0-498fcca6e0c7/volumes" Jan 28 13:17:22 crc kubenswrapper[4848]: I0128 13:17:22.851104 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:17:22 crc kubenswrapper[4848]: E0128 13:17:22.852341 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:17:34 crc kubenswrapper[4848]: I0128 13:17:34.858064 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:17:34 crc kubenswrapper[4848]: E0128 13:17:34.859086 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:17:47 crc kubenswrapper[4848]: I0128 13:17:47.850618 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:17:47 crc kubenswrapper[4848]: E0128 13:17:47.851921 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:17:56 crc kubenswrapper[4848]: I0128 13:17:56.821902 4848 generic.go:334] "Generic (PLEG): container finished" podID="5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b" containerID="429c28a35c3f36f32c8b98617c957efaa4eb0c6e173bb275330e50d0cb9b5f75" exitCode=0 Jan 28 13:17:56 crc kubenswrapper[4848]: I0128 13:17:56.822056 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" event={"ID":"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b","Type":"ContainerDied","Data":"429c28a35c3f36f32c8b98617c957efaa4eb0c6e173bb275330e50d0cb9b5f75"} Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.088990 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-tfd9p"] Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.104237 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-tfd9p"] Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.513966 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.570763 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-ssh-key-openstack-edpm-ipam\") pod \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.571304 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtfmq\" (UniqueName: \"kubernetes.io/projected/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-kube-api-access-xtfmq\") pod \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.571406 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-inventory\") pod \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\" (UID: \"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b\") " Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.582324 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-kube-api-access-xtfmq" (OuterVolumeSpecName: "kube-api-access-xtfmq") pod "5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b" (UID: "5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b"). InnerVolumeSpecName "kube-api-access-xtfmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.614418 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-inventory" (OuterVolumeSpecName: "inventory") pod "5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b" (UID: "5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.619316 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b" (UID: "5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.679423 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtfmq\" (UniqueName: \"kubernetes.io/projected/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-kube-api-access-xtfmq\") on node \"crc\" DevicePath \"\"" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.679470 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.679481 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.845644 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" event={"ID":"5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b","Type":"ContainerDied","Data":"9fbf9afd982f59eaaf9a6a6b3ff483f7f53ca5a2ef5050bdb705db3f28ba997f"} Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.846025 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9fbf9afd982f59eaaf9a6a6b3ff483f7f53ca5a2ef5050bdb705db3f28ba997f" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.845721 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.852174 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:17:58 crc kubenswrapper[4848]: E0128 13:17:58.852500 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.864040 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f39c6ec-6d59-43de-baef-a3d680b5163f" path="/var/lib/kubelet/pods/4f39c6ec-6d59-43de-baef-a3d680b5163f/volumes" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.974381 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc"] Jan 28 13:17:58 crc kubenswrapper[4848]: E0128 13:17:58.975071 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.975102 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.975405 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.977090 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.980711 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.980946 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.981090 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.981831 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.990063 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc"] Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.990310 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.990374 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfll7\" (UniqueName: \"kubernetes.io/projected/354c2496-37a2-4d9c-9439-42b042ca2639-kube-api-access-hfll7\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:58 crc kubenswrapper[4848]: I0128 13:17:58.990514 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.093166 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.093270 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfll7\" (UniqueName: \"kubernetes.io/projected/354c2496-37a2-4d9c-9439-42b042ca2639-kube-api-access-hfll7\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.093396 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.099418 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.099808 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.121437 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfll7\" (UniqueName: \"kubernetes.io/projected/354c2496-37a2-4d9c-9439-42b042ca2639-kube-api-access-hfll7\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.298377 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:17:59 crc kubenswrapper[4848]: I0128 13:17:59.978933 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc"] Jan 28 13:18:00 crc kubenswrapper[4848]: I0128 13:18:00.869995 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" event={"ID":"354c2496-37a2-4d9c-9439-42b042ca2639","Type":"ContainerStarted","Data":"486428cf6f4f6a83e6c71722677bdc3584ffeb46a50786019b722ffb049e7303"} Jan 28 13:18:01 crc kubenswrapper[4848]: I0128 13:18:01.894311 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" event={"ID":"354c2496-37a2-4d9c-9439-42b042ca2639","Type":"ContainerStarted","Data":"7db802cce0da7a284130ac92dbb4b91d9265ba70ea697b0354879ac5989d520e"} Jan 28 13:18:01 crc kubenswrapper[4848]: I0128 13:18:01.920111 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" podStartSLOduration=3.296048909 podStartE2EDuration="3.92008948s" podCreationTimestamp="2026-01-28 13:17:58 +0000 UTC" firstStartedPulling="2026-01-28 13:17:59.984827368 +0000 UTC m=+1906.897044406" lastFinishedPulling="2026-01-28 13:18:00.608867949 +0000 UTC m=+1907.521084977" observedRunningTime="2026-01-28 13:18:01.912525724 +0000 UTC m=+1908.824742792" watchObservedRunningTime="2026-01-28 13:18:01.92008948 +0000 UTC m=+1908.832306518" Jan 28 13:18:03 crc kubenswrapper[4848]: I0128 13:18:03.722124 4848 scope.go:117] "RemoveContainer" containerID="d604957561136c62c92d570ae008387644ae6fd05288d6db20f69d27d0aa5671" Jan 28 13:18:03 crc kubenswrapper[4848]: I0128 13:18:03.764008 4848 scope.go:117] "RemoveContainer" containerID="96d971ff5f17c4ddca589ef1a8589ab52ad6d71e721a8cd230b7b7c78b96b320" Jan 28 13:18:03 crc kubenswrapper[4848]: I0128 13:18:03.820469 4848 scope.go:117] "RemoveContainer" containerID="2d2cb277ec4b7229eb89dfe85648203239f2bb02d4f98cb7321978d182742ac7" Jan 28 13:18:03 crc kubenswrapper[4848]: I0128 13:18:03.868826 4848 scope.go:117] "RemoveContainer" containerID="a5ec1b515bd1631c7a555d34f8b8848f9a1f2338e2d00eda2558e401569d2132" Jan 28 13:18:03 crc kubenswrapper[4848]: I0128 13:18:03.927651 4848 scope.go:117] "RemoveContainer" containerID="e157395a9e088032fe378f93954f217a8dcebf89352c0ad26608856fb015c7c3" Jan 28 13:18:04 crc kubenswrapper[4848]: I0128 13:18:04.016424 4848 scope.go:117] "RemoveContainer" containerID="0463f9dd2ae519a48957286e11f663a4bd8c8751fec3cb6ec2c6467847809818" Jan 28 13:18:04 crc kubenswrapper[4848]: I0128 13:18:04.043907 4848 scope.go:117] "RemoveContainer" containerID="7e61758ab3cc03fb128970cfbe4a697455b6432ebb9504212a0242a33e107169" Jan 28 13:18:10 crc kubenswrapper[4848]: I0128 13:18:10.052239 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-x5jfk"] Jan 28 13:18:10 crc kubenswrapper[4848]: I0128 13:18:10.066608 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-x6pft"] Jan 28 13:18:10 crc kubenswrapper[4848]: I0128 13:18:10.081287 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-x5jfk"] Jan 28 13:18:10 crc kubenswrapper[4848]: I0128 13:18:10.093686 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-x6pft"] Jan 28 13:18:10 crc kubenswrapper[4848]: I0128 13:18:10.876697 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88dde3f6-891e-49d7-a24c-575d166ec790" path="/var/lib/kubelet/pods/88dde3f6-891e-49d7-a24c-575d166ec790/volumes" Jan 28 13:18:10 crc kubenswrapper[4848]: I0128 13:18:10.877904 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5436ced-61f3-4be7-ac99-690c2b58939d" path="/var/lib/kubelet/pods/a5436ced-61f3-4be7-ac99-690c2b58939d/volumes" Jan 28 13:18:13 crc kubenswrapper[4848]: I0128 13:18:13.850890 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:18:13 crc kubenswrapper[4848]: E0128 13:18:13.852209 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:18:20 crc kubenswrapper[4848]: I0128 13:18:20.053310 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-skhpx"] Jan 28 13:18:20 crc kubenswrapper[4848]: I0128 13:18:20.071674 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-skhpx"] Jan 28 13:18:20 crc kubenswrapper[4848]: I0128 13:18:20.867747 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7100632-3157-40c8-9f9f-a47fcd756ca5" path="/var/lib/kubelet/pods/a7100632-3157-40c8-9f9f-a47fcd756ca5/volumes" Jan 28 13:18:22 crc kubenswrapper[4848]: I0128 13:18:22.049763 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-nhdf9"] Jan 28 13:18:22 crc kubenswrapper[4848]: I0128 13:18:22.064996 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-nhdf9"] Jan 28 13:18:22 crc kubenswrapper[4848]: I0128 13:18:22.869599 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9967e9e-d256-4645-be9b-3f3789db9f05" path="/var/lib/kubelet/pods/c9967e9e-d256-4645-be9b-3f3789db9f05/volumes" Jan 28 13:18:23 crc kubenswrapper[4848]: I0128 13:18:23.033671 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-pdmlr"] Jan 28 13:18:23 crc kubenswrapper[4848]: I0128 13:18:23.041952 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-pdmlr"] Jan 28 13:18:24 crc kubenswrapper[4848]: I0128 13:18:24.864652 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ceb3076d-8232-44f5-8184-d727ef5c2943" path="/var/lib/kubelet/pods/ceb3076d-8232-44f5-8184-d727ef5c2943/volumes" Jan 28 13:18:26 crc kubenswrapper[4848]: I0128 13:18:26.850907 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:18:26 crc kubenswrapper[4848]: E0128 13:18:26.851841 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:18:39 crc kubenswrapper[4848]: I0128 13:18:39.851719 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:18:39 crc kubenswrapper[4848]: E0128 13:18:39.853162 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:18:54 crc kubenswrapper[4848]: I0128 13:18:54.859173 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:18:54 crc kubenswrapper[4848]: E0128 13:18:54.860526 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:19:04 crc kubenswrapper[4848]: I0128 13:19:04.255907 4848 scope.go:117] "RemoveContainer" containerID="7900f694b535726a65008368f978830ffccacd4c59b079d1ca8de394b21b1a75" Jan 28 13:19:04 crc kubenswrapper[4848]: I0128 13:19:04.307905 4848 scope.go:117] "RemoveContainer" containerID="111d97c73c4dca61f6d1fe05e1666b02b7e45f4e5913df0033c11304d7d91529" Jan 28 13:19:04 crc kubenswrapper[4848]: I0128 13:19:04.354887 4848 scope.go:117] "RemoveContainer" containerID="97e52eab9d0acfa4667a46149584107f8e4a28dee028ff36cba94db1a32b055d" Jan 28 13:19:04 crc kubenswrapper[4848]: I0128 13:19:04.433302 4848 scope.go:117] "RemoveContainer" containerID="27ab91dc319aa5db65d5a398ca8f1941ecf0a581aa4710d2f4dfca8152889cc4" Jan 28 13:19:04 crc kubenswrapper[4848]: I0128 13:19:04.482902 4848 scope.go:117] "RemoveContainer" containerID="a16564a15e676ed80deeabd813eae3942cd683b1f1690fbbf131e6855a92a9ae" Jan 28 13:19:07 crc kubenswrapper[4848]: I0128 13:19:07.851210 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:19:07 crc kubenswrapper[4848]: E0128 13:19:07.852621 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:19:11 crc kubenswrapper[4848]: I0128 13:19:11.803404 4848 generic.go:334] "Generic (PLEG): container finished" podID="354c2496-37a2-4d9c-9439-42b042ca2639" containerID="7db802cce0da7a284130ac92dbb4b91d9265ba70ea697b0354879ac5989d520e" exitCode=0 Jan 28 13:19:11 crc kubenswrapper[4848]: I0128 13:19:11.803504 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" event={"ID":"354c2496-37a2-4d9c-9439-42b042ca2639","Type":"ContainerDied","Data":"7db802cce0da7a284130ac92dbb4b91d9265ba70ea697b0354879ac5989d520e"} Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.344425 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.488524 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-inventory\") pod \"354c2496-37a2-4d9c-9439-42b042ca2639\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.488658 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfll7\" (UniqueName: \"kubernetes.io/projected/354c2496-37a2-4d9c-9439-42b042ca2639-kube-api-access-hfll7\") pod \"354c2496-37a2-4d9c-9439-42b042ca2639\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.488960 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-ssh-key-openstack-edpm-ipam\") pod \"354c2496-37a2-4d9c-9439-42b042ca2639\" (UID: \"354c2496-37a2-4d9c-9439-42b042ca2639\") " Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.496645 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/354c2496-37a2-4d9c-9439-42b042ca2639-kube-api-access-hfll7" (OuterVolumeSpecName: "kube-api-access-hfll7") pod "354c2496-37a2-4d9c-9439-42b042ca2639" (UID: "354c2496-37a2-4d9c-9439-42b042ca2639"). InnerVolumeSpecName "kube-api-access-hfll7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.528041 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-inventory" (OuterVolumeSpecName: "inventory") pod "354c2496-37a2-4d9c-9439-42b042ca2639" (UID: "354c2496-37a2-4d9c-9439-42b042ca2639"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.529180 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "354c2496-37a2-4d9c-9439-42b042ca2639" (UID: "354c2496-37a2-4d9c-9439-42b042ca2639"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.591806 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.591864 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/354c2496-37a2-4d9c-9439-42b042ca2639-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.591873 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfll7\" (UniqueName: \"kubernetes.io/projected/354c2496-37a2-4d9c-9439-42b042ca2639-kube-api-access-hfll7\") on node \"crc\" DevicePath \"\"" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.828935 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" event={"ID":"354c2496-37a2-4d9c-9439-42b042ca2639","Type":"ContainerDied","Data":"486428cf6f4f6a83e6c71722677bdc3584ffeb46a50786019b722ffb049e7303"} Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.829027 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="486428cf6f4f6a83e6c71722677bdc3584ffeb46a50786019b722ffb049e7303" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.829207 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.943273 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6"] Jan 28 13:19:13 crc kubenswrapper[4848]: E0128 13:19:13.944065 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354c2496-37a2-4d9c-9439-42b042ca2639" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.944093 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="354c2496-37a2-4d9c-9439-42b042ca2639" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.944334 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="354c2496-37a2-4d9c-9439-42b042ca2639" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.945329 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.948279 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.948541 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.948784 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.950846 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:19:13 crc kubenswrapper[4848]: I0128 13:19:13.962884 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6"] Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.104647 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r792\" (UniqueName: \"kubernetes.io/projected/c4b08279-fe00-4688-8202-88df5280da09-kube-api-access-5r792\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.104749 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.104850 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.208049 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r792\" (UniqueName: \"kubernetes.io/projected/c4b08279-fe00-4688-8202-88df5280da09-kube-api-access-5r792\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.208143 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.209181 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.215316 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.223155 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.226598 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r792\" (UniqueName: \"kubernetes.io/projected/c4b08279-fe00-4688-8202-88df5280da09-kube-api-access-5r792\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-67bz6\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.277834 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:14 crc kubenswrapper[4848]: I0128 13:19:14.903399 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6"] Jan 28 13:19:15 crc kubenswrapper[4848]: I0128 13:19:15.865241 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" event={"ID":"c4b08279-fe00-4688-8202-88df5280da09","Type":"ContainerStarted","Data":"ece1201d261bcb077eb176de801d1edd81883fba12fc6aab3519e0f6bc4f012a"} Jan 28 13:19:16 crc kubenswrapper[4848]: I0128 13:19:16.681903 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:19:17 crc kubenswrapper[4848]: I0128 13:19:17.913560 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" event={"ID":"c4b08279-fe00-4688-8202-88df5280da09","Type":"ContainerStarted","Data":"03e220229327859848eae5522da89e0b482c8c71c227ace764ab1e948c9d3dc1"} Jan 28 13:19:17 crc kubenswrapper[4848]: I0128 13:19:17.963978 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" podStartSLOduration=3.205293831 podStartE2EDuration="4.963951129s" podCreationTimestamp="2026-01-28 13:19:13 +0000 UTC" firstStartedPulling="2026-01-28 13:19:14.920414462 +0000 UTC m=+1981.832631500" lastFinishedPulling="2026-01-28 13:19:16.67907176 +0000 UTC m=+1983.591288798" observedRunningTime="2026-01-28 13:19:17.955006584 +0000 UTC m=+1984.867223632" watchObservedRunningTime="2026-01-28 13:19:17.963951129 +0000 UTC m=+1984.876168177" Jan 28 13:19:19 crc kubenswrapper[4848]: I0128 13:19:19.053412 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-6g8rf"] Jan 28 13:19:19 crc kubenswrapper[4848]: I0128 13:19:19.062373 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-xvrs4"] Jan 28 13:19:19 crc kubenswrapper[4848]: I0128 13:19:19.071914 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b3ce-account-create-update-vpck6"] Jan 28 13:19:19 crc kubenswrapper[4848]: I0128 13:19:19.089194 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-6g8rf"] Jan 28 13:19:19 crc kubenswrapper[4848]: I0128 13:19:19.099374 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-xvrs4"] Jan 28 13:19:19 crc kubenswrapper[4848]: I0128 13:19:19.110419 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-b3ce-account-create-update-vpck6"] Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.031812 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-15f5-account-create-update-rd864"] Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.050404 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-15f5-account-create-update-rd864"] Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.066824 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-0dd7-account-create-update-hbr27"] Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.080208 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-pkbf5"] Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.093773 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-0dd7-account-create-update-hbr27"] Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.106126 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-pkbf5"] Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.865564 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31edbc2f-7790-429e-a6b0-6f87c88ae72f" path="/var/lib/kubelet/pods/31edbc2f-7790-429e-a6b0-6f87c88ae72f/volumes" Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.867309 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b57b9b0-bb70-43d2-a97f-7e0372e3971c" path="/var/lib/kubelet/pods/3b57b9b0-bb70-43d2-a97f-7e0372e3971c/volumes" Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.868050 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4863c1ad-76a8-4892-a664-c7deee6ed995" path="/var/lib/kubelet/pods/4863c1ad-76a8-4892-a664-c7deee6ed995/volumes" Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.868704 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81c457b6-7dbc-41b5-ba65-227b2bc7492f" path="/var/lib/kubelet/pods/81c457b6-7dbc-41b5-ba65-227b2bc7492f/volumes" Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.869918 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c11380b3-750f-4d67-89de-5449903ecba9" path="/var/lib/kubelet/pods/c11380b3-750f-4d67-89de-5449903ecba9/volumes" Jan 28 13:19:20 crc kubenswrapper[4848]: I0128 13:19:20.870552 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9a381e2-822b-4682-9b0a-602997cf8a74" path="/var/lib/kubelet/pods/c9a381e2-822b-4682-9b0a-602997cf8a74/volumes" Jan 28 13:19:22 crc kubenswrapper[4848]: I0128 13:19:22.851893 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:19:22 crc kubenswrapper[4848]: E0128 13:19:22.852705 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:19:22 crc kubenswrapper[4848]: I0128 13:19:22.977985 4848 generic.go:334] "Generic (PLEG): container finished" podID="c4b08279-fe00-4688-8202-88df5280da09" containerID="03e220229327859848eae5522da89e0b482c8c71c227ace764ab1e948c9d3dc1" exitCode=0 Jan 28 13:19:22 crc kubenswrapper[4848]: I0128 13:19:22.978047 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" event={"ID":"c4b08279-fe00-4688-8202-88df5280da09","Type":"ContainerDied","Data":"03e220229327859848eae5522da89e0b482c8c71c227ace764ab1e948c9d3dc1"} Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.510942 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.701801 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r792\" (UniqueName: \"kubernetes.io/projected/c4b08279-fe00-4688-8202-88df5280da09-kube-api-access-5r792\") pod \"c4b08279-fe00-4688-8202-88df5280da09\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.702039 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-inventory\") pod \"c4b08279-fe00-4688-8202-88df5280da09\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.702093 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-ssh-key-openstack-edpm-ipam\") pod \"c4b08279-fe00-4688-8202-88df5280da09\" (UID: \"c4b08279-fe00-4688-8202-88df5280da09\") " Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.710167 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4b08279-fe00-4688-8202-88df5280da09-kube-api-access-5r792" (OuterVolumeSpecName: "kube-api-access-5r792") pod "c4b08279-fe00-4688-8202-88df5280da09" (UID: "c4b08279-fe00-4688-8202-88df5280da09"). InnerVolumeSpecName "kube-api-access-5r792". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.739348 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-inventory" (OuterVolumeSpecName: "inventory") pod "c4b08279-fe00-4688-8202-88df5280da09" (UID: "c4b08279-fe00-4688-8202-88df5280da09"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.751889 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c4b08279-fe00-4688-8202-88df5280da09" (UID: "c4b08279-fe00-4688-8202-88df5280da09"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.806567 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r792\" (UniqueName: \"kubernetes.io/projected/c4b08279-fe00-4688-8202-88df5280da09-kube-api-access-5r792\") on node \"crc\" DevicePath \"\"" Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.806638 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:19:24 crc kubenswrapper[4848]: I0128 13:19:24.806659 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c4b08279-fe00-4688-8202-88df5280da09-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.000631 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" event={"ID":"c4b08279-fe00-4688-8202-88df5280da09","Type":"ContainerDied","Data":"ece1201d261bcb077eb176de801d1edd81883fba12fc6aab3519e0f6bc4f012a"} Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.000697 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ece1201d261bcb077eb176de801d1edd81883fba12fc6aab3519e0f6bc4f012a" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.000697 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-67bz6" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.111710 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb"] Jan 28 13:19:25 crc kubenswrapper[4848]: E0128 13:19:25.112529 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4b08279-fe00-4688-8202-88df5280da09" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.112551 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4b08279-fe00-4688-8202-88df5280da09" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.112743 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4b08279-fe00-4688-8202-88df5280da09" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.113595 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.121631 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.121744 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.121990 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.124184 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.149500 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb"] Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.217701 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vm8j\" (UniqueName: \"kubernetes.io/projected/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-kube-api-access-5vm8j\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.218603 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.218843 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.321697 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.321798 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.321901 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vm8j\" (UniqueName: \"kubernetes.io/projected/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-kube-api-access-5vm8j\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.328128 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.338236 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.343027 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vm8j\" (UniqueName: \"kubernetes.io/projected/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-kube-api-access-5vm8j\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zjdcb\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:25 crc kubenswrapper[4848]: I0128 13:19:25.442626 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:19:26 crc kubenswrapper[4848]: I0128 13:19:26.066013 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb"] Jan 28 13:19:27 crc kubenswrapper[4848]: I0128 13:19:27.021582 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" event={"ID":"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3","Type":"ContainerStarted","Data":"dd791be3ca81d840815522d23bd60c595240bad686616c25d6b83828755c7a9b"} Jan 28 13:19:28 crc kubenswrapper[4848]: I0128 13:19:28.034058 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" event={"ID":"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3","Type":"ContainerStarted","Data":"f6ddcf5742dbaf00f58cbdf91fdc0b85d687dbfe23f05d4ec34c71609c346a7e"} Jan 28 13:19:28 crc kubenswrapper[4848]: I0128 13:19:28.057660 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" podStartSLOduration=2.300669775 podStartE2EDuration="3.057634705s" podCreationTimestamp="2026-01-28 13:19:25 +0000 UTC" firstStartedPulling="2026-01-28 13:19:26.07004886 +0000 UTC m=+1992.982265898" lastFinishedPulling="2026-01-28 13:19:26.827013789 +0000 UTC m=+1993.739230828" observedRunningTime="2026-01-28 13:19:28.053770748 +0000 UTC m=+1994.965987796" watchObservedRunningTime="2026-01-28 13:19:28.057634705 +0000 UTC m=+1994.969851743" Jan 28 13:19:34 crc kubenswrapper[4848]: I0128 13:19:34.858539 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:19:34 crc kubenswrapper[4848]: E0128 13:19:34.859995 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:19:45 crc kubenswrapper[4848]: I0128 13:19:45.851221 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:19:47 crc kubenswrapper[4848]: I0128 13:19:47.264398 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"07611487a31b7c45ba47ed64a959f661cead56f7ba8c4db44c7b948853391684"} Jan 28 13:20:04 crc kubenswrapper[4848]: I0128 13:20:04.687896 4848 scope.go:117] "RemoveContainer" containerID="f0824b49077eeec8e6cb0cf213b42661d575cca9ecdc679edee1dcbb91abb8bf" Jan 28 13:20:04 crc kubenswrapper[4848]: I0128 13:20:04.713008 4848 scope.go:117] "RemoveContainer" containerID="4c073da8e3337b45af4f84c2af9f68e9123607f84283718c7e3082044e5ac82b" Jan 28 13:20:04 crc kubenswrapper[4848]: I0128 13:20:04.766521 4848 scope.go:117] "RemoveContainer" containerID="b81354ae161679f02349312cf75a1c9e1a540d26882e99b5d96124bc5066fb9f" Jan 28 13:20:04 crc kubenswrapper[4848]: I0128 13:20:04.817408 4848 scope.go:117] "RemoveContainer" containerID="cc3ba5f023093e8d8f0907f78f6779698ce152fe455974dd128ebfc9b3f89bd5" Jan 28 13:20:04 crc kubenswrapper[4848]: I0128 13:20:04.874037 4848 scope.go:117] "RemoveContainer" containerID="1d08abb1260dd36dc5beab0a1a999d5ed404cf6ad84d269670b995997f3c1848" Jan 28 13:20:04 crc kubenswrapper[4848]: I0128 13:20:04.929373 4848 scope.go:117] "RemoveContainer" containerID="edf32bc74c1e8aafe6e88b62505f05ec787f1cbdd02deaf06a232c7d0927069f" Jan 28 13:20:06 crc kubenswrapper[4848]: I0128 13:20:06.485538 4848 generic.go:334] "Generic (PLEG): container finished" podID="63a4d58d-3a42-4ddc-b735-af5e71c2ffd3" containerID="f6ddcf5742dbaf00f58cbdf91fdc0b85d687dbfe23f05d4ec34c71609c346a7e" exitCode=0 Jan 28 13:20:06 crc kubenswrapper[4848]: I0128 13:20:06.485586 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" event={"ID":"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3","Type":"ContainerDied","Data":"f6ddcf5742dbaf00f58cbdf91fdc0b85d687dbfe23f05d4ec34c71609c346a7e"} Jan 28 13:20:07 crc kubenswrapper[4848]: I0128 13:20:07.926422 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.044952 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-ssh-key-openstack-edpm-ipam\") pod \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.045211 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-inventory\") pod \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.045405 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vm8j\" (UniqueName: \"kubernetes.io/projected/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-kube-api-access-5vm8j\") pod \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\" (UID: \"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3\") " Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.053085 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-kube-api-access-5vm8j" (OuterVolumeSpecName: "kube-api-access-5vm8j") pod "63a4d58d-3a42-4ddc-b735-af5e71c2ffd3" (UID: "63a4d58d-3a42-4ddc-b735-af5e71c2ffd3"). InnerVolumeSpecName "kube-api-access-5vm8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.076103 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "63a4d58d-3a42-4ddc-b735-af5e71c2ffd3" (UID: "63a4d58d-3a42-4ddc-b735-af5e71c2ffd3"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.079204 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-inventory" (OuterVolumeSpecName: "inventory") pod "63a4d58d-3a42-4ddc-b735-af5e71c2ffd3" (UID: "63a4d58d-3a42-4ddc-b735-af5e71c2ffd3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.147466 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.147500 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vm8j\" (UniqueName: \"kubernetes.io/projected/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-kube-api-access-5vm8j\") on node \"crc\" DevicePath \"\"" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.147511 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63a4d58d-3a42-4ddc-b735-af5e71c2ffd3-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.510540 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" event={"ID":"63a4d58d-3a42-4ddc-b735-af5e71c2ffd3","Type":"ContainerDied","Data":"dd791be3ca81d840815522d23bd60c595240bad686616c25d6b83828755c7a9b"} Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.510613 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd791be3ca81d840815522d23bd60c595240bad686616c25d6b83828755c7a9b" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.510635 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zjdcb" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.613194 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c"] Jan 28 13:20:08 crc kubenswrapper[4848]: E0128 13:20:08.614114 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a4d58d-3a42-4ddc-b735-af5e71c2ffd3" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.614140 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a4d58d-3a42-4ddc-b735-af5e71c2ffd3" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.614444 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="63a4d58d-3a42-4ddc-b735-af5e71c2ffd3" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.615443 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.619217 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.619551 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.619684 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.619793 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.624304 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c"] Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.658801 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.658892 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.659084 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g4x4\" (UniqueName: \"kubernetes.io/projected/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-kube-api-access-9g4x4\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.761911 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.762022 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.762449 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g4x4\" (UniqueName: \"kubernetes.io/projected/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-kube-api-access-9g4x4\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.766317 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.769020 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.785113 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g4x4\" (UniqueName: \"kubernetes.io/projected/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-kube-api-access-9g4x4\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:08 crc kubenswrapper[4848]: I0128 13:20:08.936138 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:20:09 crc kubenswrapper[4848]: I0128 13:20:09.531792 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c"] Jan 28 13:20:10 crc kubenswrapper[4848]: I0128 13:20:10.538132 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" event={"ID":"dee48f15-f76a-4039-b7a1-85c61a4d2ed3","Type":"ContainerStarted","Data":"b5470728f43d2c5c97ea9844fd4a7defcccabfcc5fc9e362fbb0547526cc1d86"} Jan 28 13:20:10 crc kubenswrapper[4848]: I0128 13:20:10.538609 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" event={"ID":"dee48f15-f76a-4039-b7a1-85c61a4d2ed3","Type":"ContainerStarted","Data":"7d8b2dc54cff4a6724943d74498fb43c8c1f1a4307047c5e8d2f46fe4c056c97"} Jan 28 13:20:10 crc kubenswrapper[4848]: I0128 13:20:10.565013 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" podStartSLOduration=2.152843732 podStartE2EDuration="2.564985453s" podCreationTimestamp="2026-01-28 13:20:08 +0000 UTC" firstStartedPulling="2026-01-28 13:20:09.541955411 +0000 UTC m=+2036.454172449" lastFinishedPulling="2026-01-28 13:20:09.954097132 +0000 UTC m=+2036.866314170" observedRunningTime="2026-01-28 13:20:10.555070293 +0000 UTC m=+2037.467287331" watchObservedRunningTime="2026-01-28 13:20:10.564985453 +0000 UTC m=+2037.477202491" Jan 28 13:20:15 crc kubenswrapper[4848]: I0128 13:20:15.062152 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8rd4v"] Jan 28 13:20:15 crc kubenswrapper[4848]: I0128 13:20:15.073521 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8rd4v"] Jan 28 13:20:16 crc kubenswrapper[4848]: I0128 13:20:16.864808 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1ace158-4e32-4a9a-b350-4afddceb574c" path="/var/lib/kubelet/pods/c1ace158-4e32-4a9a-b350-4afddceb574c/volumes" Jan 28 13:20:43 crc kubenswrapper[4848]: I0128 13:20:43.053324 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-xqjf4"] Jan 28 13:20:43 crc kubenswrapper[4848]: I0128 13:20:43.063357 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-xqjf4"] Jan 28 13:20:44 crc kubenswrapper[4848]: I0128 13:20:44.867398 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77d21746-7f11-4c88-9433-8672991fe2e3" path="/var/lib/kubelet/pods/77d21746-7f11-4c88-9433-8672991fe2e3/volumes" Jan 28 13:20:54 crc kubenswrapper[4848]: I0128 13:20:54.044159 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bglkq"] Jan 28 13:20:54 crc kubenswrapper[4848]: I0128 13:20:54.052763 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bglkq"] Jan 28 13:20:54 crc kubenswrapper[4848]: I0128 13:20:54.865428 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba850830-ca3a-43c2-8639-bdf9386d7f9b" path="/var/lib/kubelet/pods/ba850830-ca3a-43c2-8639-bdf9386d7f9b/volumes" Jan 28 13:21:01 crc kubenswrapper[4848]: I0128 13:21:01.144364 4848 generic.go:334] "Generic (PLEG): container finished" podID="dee48f15-f76a-4039-b7a1-85c61a4d2ed3" containerID="b5470728f43d2c5c97ea9844fd4a7defcccabfcc5fc9e362fbb0547526cc1d86" exitCode=0 Jan 28 13:21:01 crc kubenswrapper[4848]: I0128 13:21:01.144472 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" event={"ID":"dee48f15-f76a-4039-b7a1-85c61a4d2ed3","Type":"ContainerDied","Data":"b5470728f43d2c5c97ea9844fd4a7defcccabfcc5fc9e362fbb0547526cc1d86"} Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.637295 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.702792 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g4x4\" (UniqueName: \"kubernetes.io/projected/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-kube-api-access-9g4x4\") pod \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.702855 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-inventory\") pod \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.703057 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-ssh-key-openstack-edpm-ipam\") pod \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\" (UID: \"dee48f15-f76a-4039-b7a1-85c61a4d2ed3\") " Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.718562 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-kube-api-access-9g4x4" (OuterVolumeSpecName: "kube-api-access-9g4x4") pod "dee48f15-f76a-4039-b7a1-85c61a4d2ed3" (UID: "dee48f15-f76a-4039-b7a1-85c61a4d2ed3"). InnerVolumeSpecName "kube-api-access-9g4x4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.744379 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-inventory" (OuterVolumeSpecName: "inventory") pod "dee48f15-f76a-4039-b7a1-85c61a4d2ed3" (UID: "dee48f15-f76a-4039-b7a1-85c61a4d2ed3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.751834 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "dee48f15-f76a-4039-b7a1-85c61a4d2ed3" (UID: "dee48f15-f76a-4039-b7a1-85c61a4d2ed3"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.806747 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g4x4\" (UniqueName: \"kubernetes.io/projected/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-kube-api-access-9g4x4\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.806790 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:02 crc kubenswrapper[4848]: I0128 13:21:02.806805 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dee48f15-f76a-4039-b7a1-85c61a4d2ed3-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.165432 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" event={"ID":"dee48f15-f76a-4039-b7a1-85c61a4d2ed3","Type":"ContainerDied","Data":"7d8b2dc54cff4a6724943d74498fb43c8c1f1a4307047c5e8d2f46fe4c056c97"} Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.165495 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d8b2dc54cff4a6724943d74498fb43c8c1f1a4307047c5e8d2f46fe4c056c97" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.166006 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.275117 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rzhsb"] Jan 28 13:21:03 crc kubenswrapper[4848]: E0128 13:21:03.276038 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dee48f15-f76a-4039-b7a1-85c61a4d2ed3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.276071 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="dee48f15-f76a-4039-b7a1-85c61a4d2ed3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.276389 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="dee48f15-f76a-4039-b7a1-85c61a4d2ed3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.277483 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.280491 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.280655 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.281482 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.283376 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.296790 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rzhsb"] Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.330664 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfb5c\" (UniqueName: \"kubernetes.io/projected/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-kube-api-access-pfb5c\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.330720 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.330961 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.433405 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfb5c\" (UniqueName: \"kubernetes.io/projected/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-kube-api-access-pfb5c\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.433824 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.433997 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.438603 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.445350 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.455878 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfb5c\" (UniqueName: \"kubernetes.io/projected/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-kube-api-access-pfb5c\") pod \"ssh-known-hosts-edpm-deployment-rzhsb\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:03 crc kubenswrapper[4848]: I0128 13:21:03.595862 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:04 crc kubenswrapper[4848]: I0128 13:21:04.190020 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rzhsb"] Jan 28 13:21:05 crc kubenswrapper[4848]: I0128 13:21:05.094030 4848 scope.go:117] "RemoveContainer" containerID="c23039557b3d2d5dddf6c6ed44be31cfe71c0d206b50bbab5afa2fda6fee2408" Jan 28 13:21:05 crc kubenswrapper[4848]: I0128 13:21:05.154062 4848 scope.go:117] "RemoveContainer" containerID="ed772e9f95c32c4ce555ea9cb08e7cad878f5377ffb4f74863f0cf0ea90c01d2" Jan 28 13:21:05 crc kubenswrapper[4848]: I0128 13:21:05.201575 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" event={"ID":"548fac9b-bd05-42b8-8c88-7c9de08ae4b2","Type":"ContainerStarted","Data":"a9e360ebfe24b16e8956580a5ffa155ce04a79d0fdda7f4ea35a875721ba9b12"} Jan 28 13:21:05 crc kubenswrapper[4848]: I0128 13:21:05.227337 4848 scope.go:117] "RemoveContainer" containerID="7ac8c112211d7b6ad12d0f8daf1a5d6983909337df04ebfa23aafb9bb60e5ed1" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.215655 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" event={"ID":"548fac9b-bd05-42b8-8c88-7c9de08ae4b2","Type":"ContainerStarted","Data":"fa77969a99285f3ee1ed33e552db0c6cdede12baa4b42846bbc789e83f0a8c43"} Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.241155 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" podStartSLOduration=2.548678557 podStartE2EDuration="3.24112925s" podCreationTimestamp="2026-01-28 13:21:03 +0000 UTC" firstStartedPulling="2026-01-28 13:21:04.195832076 +0000 UTC m=+2091.108049114" lastFinishedPulling="2026-01-28 13:21:04.888282769 +0000 UTC m=+2091.800499807" observedRunningTime="2026-01-28 13:21:06.237948653 +0000 UTC m=+2093.150165691" watchObservedRunningTime="2026-01-28 13:21:06.24112925 +0000 UTC m=+2093.153346288" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.420310 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-79twb"] Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.423489 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.448293 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-79twb"] Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.515609 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-utilities\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.515831 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps492\" (UniqueName: \"kubernetes.io/projected/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-kube-api-access-ps492\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.515968 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-catalog-content\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.618159 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-utilities\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.618330 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps492\" (UniqueName: \"kubernetes.io/projected/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-kube-api-access-ps492\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.618423 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-catalog-content\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.619052 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-utilities\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.619092 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-catalog-content\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.650576 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps492\" (UniqueName: \"kubernetes.io/projected/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-kube-api-access-ps492\") pod \"redhat-operators-79twb\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:06 crc kubenswrapper[4848]: I0128 13:21:06.761378 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:07 crc kubenswrapper[4848]: I0128 13:21:07.331942 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-79twb"] Jan 28 13:21:07 crc kubenswrapper[4848]: W0128 13:21:07.337655 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fb9fa03_c8a0_42c8_83c9_c73bd403d4a3.slice/crio-620c843580e1becc2e75238cfb53ebfbdc14701056e4ba78ce3820648c882a92 WatchSource:0}: Error finding container 620c843580e1becc2e75238cfb53ebfbdc14701056e4ba78ce3820648c882a92: Status 404 returned error can't find the container with id 620c843580e1becc2e75238cfb53ebfbdc14701056e4ba78ce3820648c882a92 Jan 28 13:21:08 crc kubenswrapper[4848]: I0128 13:21:08.236539 4848 generic.go:334] "Generic (PLEG): container finished" podID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerID="51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5" exitCode=0 Jan 28 13:21:08 crc kubenswrapper[4848]: I0128 13:21:08.236629 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-79twb" event={"ID":"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3","Type":"ContainerDied","Data":"51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5"} Jan 28 13:21:08 crc kubenswrapper[4848]: I0128 13:21:08.237425 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-79twb" event={"ID":"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3","Type":"ContainerStarted","Data":"620c843580e1becc2e75238cfb53ebfbdc14701056e4ba78ce3820648c882a92"} Jan 28 13:21:10 crc kubenswrapper[4848]: I0128 13:21:10.262683 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-79twb" event={"ID":"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3","Type":"ContainerStarted","Data":"5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392"} Jan 28 13:21:13 crc kubenswrapper[4848]: I0128 13:21:13.307019 4848 generic.go:334] "Generic (PLEG): container finished" podID="548fac9b-bd05-42b8-8c88-7c9de08ae4b2" containerID="fa77969a99285f3ee1ed33e552db0c6cdede12baa4b42846bbc789e83f0a8c43" exitCode=0 Jan 28 13:21:13 crc kubenswrapper[4848]: I0128 13:21:13.307121 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" event={"ID":"548fac9b-bd05-42b8-8c88-7c9de08ae4b2","Type":"ContainerDied","Data":"fa77969a99285f3ee1ed33e552db0c6cdede12baa4b42846bbc789e83f0a8c43"} Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.797303 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.848358 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-ssh-key-openstack-edpm-ipam\") pod \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.848513 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfb5c\" (UniqueName: \"kubernetes.io/projected/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-kube-api-access-pfb5c\") pod \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.848584 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-inventory-0\") pod \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\" (UID: \"548fac9b-bd05-42b8-8c88-7c9de08ae4b2\") " Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.878066 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-kube-api-access-pfb5c" (OuterVolumeSpecName: "kube-api-access-pfb5c") pod "548fac9b-bd05-42b8-8c88-7c9de08ae4b2" (UID: "548fac9b-bd05-42b8-8c88-7c9de08ae4b2"). InnerVolumeSpecName "kube-api-access-pfb5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.886630 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "548fac9b-bd05-42b8-8c88-7c9de08ae4b2" (UID: "548fac9b-bd05-42b8-8c88-7c9de08ae4b2"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.896725 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "548fac9b-bd05-42b8-8c88-7c9de08ae4b2" (UID: "548fac9b-bd05-42b8-8c88-7c9de08ae4b2"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.952110 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.952164 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfb5c\" (UniqueName: \"kubernetes.io/projected/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-kube-api-access-pfb5c\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:14 crc kubenswrapper[4848]: I0128 13:21:14.952178 4848 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/548fac9b-bd05-42b8-8c88-7c9de08ae4b2-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.330507 4848 generic.go:334] "Generic (PLEG): container finished" podID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerID="5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392" exitCode=0 Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.330617 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-79twb" event={"ID":"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3","Type":"ContainerDied","Data":"5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392"} Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.340299 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" event={"ID":"548fac9b-bd05-42b8-8c88-7c9de08ae4b2","Type":"ContainerDied","Data":"a9e360ebfe24b16e8956580a5ffa155ce04a79d0fdda7f4ea35a875721ba9b12"} Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.340354 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9e360ebfe24b16e8956580a5ffa155ce04a79d0fdda7f4ea35a875721ba9b12" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.340381 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rzhsb" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.430129 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9"] Jan 28 13:21:15 crc kubenswrapper[4848]: E0128 13:21:15.430847 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="548fac9b-bd05-42b8-8c88-7c9de08ae4b2" containerName="ssh-known-hosts-edpm-deployment" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.430885 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="548fac9b-bd05-42b8-8c88-7c9de08ae4b2" containerName="ssh-known-hosts-edpm-deployment" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.431201 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="548fac9b-bd05-42b8-8c88-7c9de08ae4b2" containerName="ssh-known-hosts-edpm-deployment" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.432307 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.436682 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.436917 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.437200 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.437355 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.443026 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9"] Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.472196 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx7xl\" (UniqueName: \"kubernetes.io/projected/e8f81366-a592-4a64-b4e7-7d036d232b6b-kube-api-access-qx7xl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.472406 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.472547 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.575200 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx7xl\" (UniqueName: \"kubernetes.io/projected/e8f81366-a592-4a64-b4e7-7d036d232b6b-kube-api-access-qx7xl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.575339 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.575445 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.583684 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.588130 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.599717 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx7xl\" (UniqueName: \"kubernetes.io/projected/e8f81366-a592-4a64-b4e7-7d036d232b6b-kube-api-access-qx7xl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ntmv9\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:15 crc kubenswrapper[4848]: I0128 13:21:15.758663 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:16 crc kubenswrapper[4848]: I0128 13:21:16.338978 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9"] Jan 28 13:21:16 crc kubenswrapper[4848]: W0128 13:21:16.348539 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8f81366_a592_4a64_b4e7_7d036d232b6b.slice/crio-b87e73fe5a3c1d948fbd89d354503a82fe916aac19e83265a386bacb5e06b144 WatchSource:0}: Error finding container b87e73fe5a3c1d948fbd89d354503a82fe916aac19e83265a386bacb5e06b144: Status 404 returned error can't find the container with id b87e73fe5a3c1d948fbd89d354503a82fe916aac19e83265a386bacb5e06b144 Jan 28 13:21:16 crc kubenswrapper[4848]: I0128 13:21:16.362606 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-79twb" event={"ID":"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3","Type":"ContainerStarted","Data":"c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c"} Jan 28 13:21:16 crc kubenswrapper[4848]: I0128 13:21:16.393811 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-79twb" podStartSLOduration=2.853915697 podStartE2EDuration="10.393786288s" podCreationTimestamp="2026-01-28 13:21:06 +0000 UTC" firstStartedPulling="2026-01-28 13:21:08.238491361 +0000 UTC m=+2095.150708399" lastFinishedPulling="2026-01-28 13:21:15.778361962 +0000 UTC m=+2102.690578990" observedRunningTime="2026-01-28 13:21:16.3876521 +0000 UTC m=+2103.299869138" watchObservedRunningTime="2026-01-28 13:21:16.393786288 +0000 UTC m=+2103.306003326" Jan 28 13:21:16 crc kubenswrapper[4848]: I0128 13:21:16.968707 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:16 crc kubenswrapper[4848]: I0128 13:21:16.987604 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:17 crc kubenswrapper[4848]: I0128 13:21:17.378355 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" event={"ID":"e8f81366-a592-4a64-b4e7-7d036d232b6b","Type":"ContainerStarted","Data":"d10cf00720486eae4641b6262acdf4d6cb7a3d2c2a5eaf25b2ad433d36bff399"} Jan 28 13:21:17 crc kubenswrapper[4848]: I0128 13:21:17.378933 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" event={"ID":"e8f81366-a592-4a64-b4e7-7d036d232b6b","Type":"ContainerStarted","Data":"b87e73fe5a3c1d948fbd89d354503a82fe916aac19e83265a386bacb5e06b144"} Jan 28 13:21:17 crc kubenswrapper[4848]: I0128 13:21:17.415244 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" podStartSLOduration=1.771363282 podStartE2EDuration="2.415220696s" podCreationTimestamp="2026-01-28 13:21:15 +0000 UTC" firstStartedPulling="2026-01-28 13:21:16.358234335 +0000 UTC m=+2103.270451383" lastFinishedPulling="2026-01-28 13:21:17.002091749 +0000 UTC m=+2103.914308797" observedRunningTime="2026-01-28 13:21:17.41096628 +0000 UTC m=+2104.323183328" watchObservedRunningTime="2026-01-28 13:21:17.415220696 +0000 UTC m=+2104.327437734" Jan 28 13:21:18 crc kubenswrapper[4848]: I0128 13:21:18.038855 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-79twb" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="registry-server" probeResult="failure" output=< Jan 28 13:21:18 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:21:18 crc kubenswrapper[4848]: > Jan 28 13:21:26 crc kubenswrapper[4848]: I0128 13:21:26.058543 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-vzjwq"] Jan 28 13:21:26 crc kubenswrapper[4848]: I0128 13:21:26.077291 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-vzjwq"] Jan 28 13:21:26 crc kubenswrapper[4848]: I0128 13:21:26.500220 4848 generic.go:334] "Generic (PLEG): container finished" podID="e8f81366-a592-4a64-b4e7-7d036d232b6b" containerID="d10cf00720486eae4641b6262acdf4d6cb7a3d2c2a5eaf25b2ad433d36bff399" exitCode=0 Jan 28 13:21:26 crc kubenswrapper[4848]: I0128 13:21:26.500308 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" event={"ID":"e8f81366-a592-4a64-b4e7-7d036d232b6b","Type":"ContainerDied","Data":"d10cf00720486eae4641b6262acdf4d6cb7a3d2c2a5eaf25b2ad433d36bff399"} Jan 28 13:21:26 crc kubenswrapper[4848]: I0128 13:21:26.815138 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:26 crc kubenswrapper[4848]: I0128 13:21:26.905762 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de8237ad-c0ce-4a60-a455-1ff2c36ad531" path="/var/lib/kubelet/pods/de8237ad-c0ce-4a60-a455-1ff2c36ad531/volumes" Jan 28 13:21:26 crc kubenswrapper[4848]: I0128 13:21:26.907232 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:27 crc kubenswrapper[4848]: I0128 13:21:27.071932 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-79twb"] Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.073828 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.208070 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qx7xl\" (UniqueName: \"kubernetes.io/projected/e8f81366-a592-4a64-b4e7-7d036d232b6b-kube-api-access-qx7xl\") pod \"e8f81366-a592-4a64-b4e7-7d036d232b6b\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.208270 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-inventory\") pod \"e8f81366-a592-4a64-b4e7-7d036d232b6b\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.208464 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-ssh-key-openstack-edpm-ipam\") pod \"e8f81366-a592-4a64-b4e7-7d036d232b6b\" (UID: \"e8f81366-a592-4a64-b4e7-7d036d232b6b\") " Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.216707 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8f81366-a592-4a64-b4e7-7d036d232b6b-kube-api-access-qx7xl" (OuterVolumeSpecName: "kube-api-access-qx7xl") pod "e8f81366-a592-4a64-b4e7-7d036d232b6b" (UID: "e8f81366-a592-4a64-b4e7-7d036d232b6b"). InnerVolumeSpecName "kube-api-access-qx7xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.240079 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e8f81366-a592-4a64-b4e7-7d036d232b6b" (UID: "e8f81366-a592-4a64-b4e7-7d036d232b6b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.242567 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-inventory" (OuterVolumeSpecName: "inventory") pod "e8f81366-a592-4a64-b4e7-7d036d232b6b" (UID: "e8f81366-a592-4a64-b4e7-7d036d232b6b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.311501 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.311563 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qx7xl\" (UniqueName: \"kubernetes.io/projected/e8f81366-a592-4a64-b4e7-7d036d232b6b-kube-api-access-qx7xl\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.311574 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8f81366-a592-4a64-b4e7-7d036d232b6b-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.525672 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" event={"ID":"e8f81366-a592-4a64-b4e7-7d036d232b6b","Type":"ContainerDied","Data":"b87e73fe5a3c1d948fbd89d354503a82fe916aac19e83265a386bacb5e06b144"} Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.525716 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ntmv9" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.525751 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b87e73fe5a3c1d948fbd89d354503a82fe916aac19e83265a386bacb5e06b144" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.526733 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-79twb" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="registry-server" containerID="cri-o://c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c" gracePeriod=2 Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.649403 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm"] Jan 28 13:21:28 crc kubenswrapper[4848]: E0128 13:21:28.650914 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8f81366-a592-4a64-b4e7-7d036d232b6b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.650932 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8f81366-a592-4a64-b4e7-7d036d232b6b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.651163 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8f81366-a592-4a64-b4e7-7d036d232b6b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.652106 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.659879 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.660221 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.660023 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.660675 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.691265 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm"] Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.823672 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.823881 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.823944 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-kube-api-access-rcr4k\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.926639 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.926735 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-kube-api-access-rcr4k\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.926810 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.932869 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.933052 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:28 crc kubenswrapper[4848]: I0128 13:21:28.945667 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-kube-api-access-rcr4k\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.049548 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.064847 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.233605 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-utilities\") pod \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.235152 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-utilities" (OuterVolumeSpecName: "utilities") pod "4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" (UID: "4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.247022 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-catalog-content\") pod \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.247141 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps492\" (UniqueName: \"kubernetes.io/projected/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-kube-api-access-ps492\") pod \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\" (UID: \"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3\") " Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.248143 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.255549 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-kube-api-access-ps492" (OuterVolumeSpecName: "kube-api-access-ps492") pod "4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" (UID: "4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3"). InnerVolumeSpecName "kube-api-access-ps492". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.349340 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps492\" (UniqueName: \"kubernetes.io/projected/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-kube-api-access-ps492\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.387989 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" (UID: "4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.450802 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.562870 4848 generic.go:334] "Generic (PLEG): container finished" podID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerID="c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c" exitCode=0 Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.563397 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-79twb" event={"ID":"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3","Type":"ContainerDied","Data":"c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c"} Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.563450 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-79twb" event={"ID":"4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3","Type":"ContainerDied","Data":"620c843580e1becc2e75238cfb53ebfbdc14701056e4ba78ce3820648c882a92"} Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.563496 4848 scope.go:117] "RemoveContainer" containerID="c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.563752 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-79twb" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.604938 4848 scope.go:117] "RemoveContainer" containerID="5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.624366 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-79twb"] Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.634988 4848 scope.go:117] "RemoveContainer" containerID="51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.640491 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-79twb"] Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.655286 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm"] Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.662034 4848 scope.go:117] "RemoveContainer" containerID="c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.662637 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:21:29 crc kubenswrapper[4848]: E0128 13:21:29.663267 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c\": container with ID starting with c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c not found: ID does not exist" containerID="c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.663373 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c"} err="failed to get container status \"c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c\": rpc error: code = NotFound desc = could not find container \"c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c\": container with ID starting with c4001ac9a7ed02c4ce16b72156c2bedbe4eae485d36b871ec660a03c7bbb225c not found: ID does not exist" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.663460 4848 scope.go:117] "RemoveContainer" containerID="5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392" Jan 28 13:21:29 crc kubenswrapper[4848]: E0128 13:21:29.663883 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392\": container with ID starting with 5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392 not found: ID does not exist" containerID="5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.663962 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392"} err="failed to get container status \"5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392\": rpc error: code = NotFound desc = could not find container \"5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392\": container with ID starting with 5ccd1ed6a1446db343ada94844d2c25bebb05c010038f5b00126e5d83c5c4392 not found: ID does not exist" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.664016 4848 scope.go:117] "RemoveContainer" containerID="51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5" Jan 28 13:21:29 crc kubenswrapper[4848]: E0128 13:21:29.665072 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5\": container with ID starting with 51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5 not found: ID does not exist" containerID="51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5" Jan 28 13:21:29 crc kubenswrapper[4848]: I0128 13:21:29.665113 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5"} err="failed to get container status \"51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5\": rpc error: code = NotFound desc = could not find container \"51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5\": container with ID starting with 51ee1548b9d685fef7b25e3f89ca0dc59d5eb70f467270fceb3e121874b67dc5 not found: ID does not exist" Jan 28 13:21:30 crc kubenswrapper[4848]: I0128 13:21:30.577194 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" event={"ID":"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898","Type":"ContainerStarted","Data":"5e745731eacb83335ef7cdc647284d88879d2f54c23f3f16f76bdd5732e36f28"} Jan 28 13:21:30 crc kubenswrapper[4848]: I0128 13:21:30.870106 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" path="/var/lib/kubelet/pods/4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3/volumes" Jan 28 13:21:31 crc kubenswrapper[4848]: I0128 13:21:31.593200 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" event={"ID":"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898","Type":"ContainerStarted","Data":"16c6200bab0c11470daa238f0e7eed77d7341bea3d8121b6ce890141a2b2bb7d"} Jan 28 13:21:31 crc kubenswrapper[4848]: I0128 13:21:31.628410 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" podStartSLOduration=2.559679095 podStartE2EDuration="3.624702347s" podCreationTimestamp="2026-01-28 13:21:28 +0000 UTC" firstStartedPulling="2026-01-28 13:21:29.662375694 +0000 UTC m=+2116.574592732" lastFinishedPulling="2026-01-28 13:21:30.727398946 +0000 UTC m=+2117.639615984" observedRunningTime="2026-01-28 13:21:31.61312523 +0000 UTC m=+2118.525342278" watchObservedRunningTime="2026-01-28 13:21:31.624702347 +0000 UTC m=+2118.536919385" Jan 28 13:21:41 crc kubenswrapper[4848]: I0128 13:21:41.712508 4848 generic.go:334] "Generic (PLEG): container finished" podID="0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898" containerID="16c6200bab0c11470daa238f0e7eed77d7341bea3d8121b6ce890141a2b2bb7d" exitCode=0 Jan 28 13:21:41 crc kubenswrapper[4848]: I0128 13:21:41.712601 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" event={"ID":"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898","Type":"ContainerDied","Data":"16c6200bab0c11470daa238f0e7eed77d7341bea3d8121b6ce890141a2b2bb7d"} Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.266195 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.338027 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-inventory\") pod \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.338178 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-ssh-key-openstack-edpm-ipam\") pod \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.338216 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-kube-api-access-rcr4k\") pod \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\" (UID: \"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898\") " Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.345763 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-kube-api-access-rcr4k" (OuterVolumeSpecName: "kube-api-access-rcr4k") pod "0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898" (UID: "0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898"). InnerVolumeSpecName "kube-api-access-rcr4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.379556 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-inventory" (OuterVolumeSpecName: "inventory") pod "0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898" (UID: "0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.384129 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898" (UID: "0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.441678 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.441731 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.441743 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898-kube-api-access-rcr4k\") on node \"crc\" DevicePath \"\"" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.735453 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" event={"ID":"0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898","Type":"ContainerDied","Data":"5e745731eacb83335ef7cdc647284d88879d2f54c23f3f16f76bdd5732e36f28"} Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.735490 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.735512 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e745731eacb83335ef7cdc647284d88879d2f54c23f3f16f76bdd5732e36f28" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.876447 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr"] Jan 28 13:21:43 crc kubenswrapper[4848]: E0128 13:21:43.876963 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.876982 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:43 crc kubenswrapper[4848]: E0128 13:21:43.877006 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="extract-utilities" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.877013 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="extract-utilities" Jan 28 13:21:43 crc kubenswrapper[4848]: E0128 13:21:43.877024 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="registry-server" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.877030 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="registry-server" Jan 28 13:21:43 crc kubenswrapper[4848]: E0128 13:21:43.877039 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="extract-content" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.877045 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="extract-content" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.877270 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb9fa03-c8a0-42c8-83c9-c73bd403d4a3" containerName="registry-server" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.877301 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.878108 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.881595 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.881913 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.882172 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.882770 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.882805 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.887434 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.887557 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.899291 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.903421 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr"] Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953550 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953626 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953665 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953704 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953755 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953784 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953808 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953827 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953857 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953880 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbt7b\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-kube-api-access-sbt7b\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953912 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.953963 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.954011 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:43 crc kubenswrapper[4848]: I0128 13:21:43.954030 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056313 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056434 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056475 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056504 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056525 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056556 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056578 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbt7b\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-kube-api-access-sbt7b\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056619 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056664 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056707 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056727 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056759 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056812 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.056837 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.064855 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.074180 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.074463 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.074577 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.075654 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.075714 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.075864 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.076773 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.076900 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.077115 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.078020 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.094277 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.094441 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.094562 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbt7b\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-kube-api-access-sbt7b\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.198460 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.700243 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr"] Jan 28 13:21:44 crc kubenswrapper[4848]: I0128 13:21:44.748363 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" event={"ID":"4acf7592-041f-43a4-b85b-a2fac8dbdc3c","Type":"ContainerStarted","Data":"63e53aa5591fba516aea0475a8bf17f97066b1b76110e7ba7f4c060ed9f24e0a"} Jan 28 13:21:46 crc kubenswrapper[4848]: I0128 13:21:46.771067 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" event={"ID":"4acf7592-041f-43a4-b85b-a2fac8dbdc3c","Type":"ContainerStarted","Data":"3ac7a906a6da7ad302f56707f19163943d2cf52e49727976082867e9e3e04ac1"} Jan 28 13:21:46 crc kubenswrapper[4848]: I0128 13:21:46.800564 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" podStartSLOduration=2.692338676 podStartE2EDuration="3.800540849s" podCreationTimestamp="2026-01-28 13:21:43 +0000 UTC" firstStartedPulling="2026-01-28 13:21:44.706465501 +0000 UTC m=+2131.618682529" lastFinishedPulling="2026-01-28 13:21:45.814667654 +0000 UTC m=+2132.726884702" observedRunningTime="2026-01-28 13:21:46.79471934 +0000 UTC m=+2133.706936598" watchObservedRunningTime="2026-01-28 13:21:46.800540849 +0000 UTC m=+2133.712757887" Jan 28 13:22:05 crc kubenswrapper[4848]: I0128 13:22:05.345797 4848 scope.go:117] "RemoveContainer" containerID="a2731cfa4f10d9756bd019e9f52666bfec9448bbc9b6769779d475d3784ca8af" Jan 28 13:22:07 crc kubenswrapper[4848]: I0128 13:22:07.924773 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:22:07 crc kubenswrapper[4848]: I0128 13:22:07.925201 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:22:25 crc kubenswrapper[4848]: I0128 13:22:25.217741 4848 generic.go:334] "Generic (PLEG): container finished" podID="4acf7592-041f-43a4-b85b-a2fac8dbdc3c" containerID="3ac7a906a6da7ad302f56707f19163943d2cf52e49727976082867e9e3e04ac1" exitCode=0 Jan 28 13:22:25 crc kubenswrapper[4848]: I0128 13:22:25.217800 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" event={"ID":"4acf7592-041f-43a4-b85b-a2fac8dbdc3c","Type":"ContainerDied","Data":"3ac7a906a6da7ad302f56707f19163943d2cf52e49727976082867e9e3e04ac1"} Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.837206 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.971726 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ovn-combined-ca-bundle\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.971776 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-nova-combined-ca-bundle\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.971813 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.971843 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-telemetry-combined-ca-bundle\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.971926 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-bootstrap-combined-ca-bundle\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.971981 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972035 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ssh-key-openstack-edpm-ipam\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972060 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972078 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-inventory\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972223 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbt7b\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-kube-api-access-sbt7b\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972313 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972336 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-libvirt-combined-ca-bundle\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972394 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-neutron-metadata-combined-ca-bundle\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.972424 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-repo-setup-combined-ca-bundle\") pod \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\" (UID: \"4acf7592-041f-43a4-b85b-a2fac8dbdc3c\") " Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.979497 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.979739 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.982100 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.983614 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-kube-api-access-sbt7b" (OuterVolumeSpecName: "kube-api-access-sbt7b") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "kube-api-access-sbt7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.983689 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.988790 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.988780 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.989158 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.992654 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.992672 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.996407 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:26 crc kubenswrapper[4848]: I0128 13:22:26.996513 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.021571 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-inventory" (OuterVolumeSpecName: "inventory") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.030051 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4acf7592-041f-43a4-b85b-a2fac8dbdc3c" (UID: "4acf7592-041f-43a4-b85b-a2fac8dbdc3c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.076912 4848 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.076948 4848 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.076983 4848 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.076995 4848 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077008 4848 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077018 4848 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077028 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077065 4848 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077076 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077086 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbt7b\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-kube-api-access-sbt7b\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077100 4848 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077133 4848 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077158 4848 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.077169 4848 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4acf7592-041f-43a4-b85b-a2fac8dbdc3c-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.258450 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" event={"ID":"4acf7592-041f-43a4-b85b-a2fac8dbdc3c","Type":"ContainerDied","Data":"63e53aa5591fba516aea0475a8bf17f97066b1b76110e7ba7f4c060ed9f24e0a"} Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.258504 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63e53aa5591fba516aea0475a8bf17f97066b1b76110e7ba7f4c060ed9f24e0a" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.258549 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.380816 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f"] Jan 28 13:22:27 crc kubenswrapper[4848]: E0128 13:22:27.381351 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4acf7592-041f-43a4-b85b-a2fac8dbdc3c" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.381367 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4acf7592-041f-43a4-b85b-a2fac8dbdc3c" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.381587 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4acf7592-041f-43a4-b85b-a2fac8dbdc3c" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.382445 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.386543 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.386779 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.386916 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.389325 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.389408 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.394438 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f"] Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.491131 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.491230 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqbb4\" (UniqueName: \"kubernetes.io/projected/ebc674d6-8c77-4481-b022-c91d7c77ec6e-kube-api-access-xqbb4\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.491541 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.491590 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.491627 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.593959 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.594100 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqbb4\" (UniqueName: \"kubernetes.io/projected/ebc674d6-8c77-4481-b022-c91d7c77ec6e-kube-api-access-xqbb4\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.594221 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.594275 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.594317 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.596109 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.599950 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.600212 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.605212 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.617095 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqbb4\" (UniqueName: \"kubernetes.io/projected/ebc674d6-8c77-4481-b022-c91d7c77ec6e-kube-api-access-xqbb4\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-jv56f\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:27 crc kubenswrapper[4848]: I0128 13:22:27.700941 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:22:28 crc kubenswrapper[4848]: I0128 13:22:28.281460 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f"] Jan 28 13:22:29 crc kubenswrapper[4848]: I0128 13:22:29.292375 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" event={"ID":"ebc674d6-8c77-4481-b022-c91d7c77ec6e","Type":"ContainerStarted","Data":"77ce6e951e225a5d1ba4df808860df032523ee23a395cfdf27928050e53e02fc"} Jan 28 13:22:29 crc kubenswrapper[4848]: I0128 13:22:29.292869 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" event={"ID":"ebc674d6-8c77-4481-b022-c91d7c77ec6e","Type":"ContainerStarted","Data":"c9ebce0f2879adebe61c173d36066679c2af0c8ae25459df6588e88f95e3bd3a"} Jan 28 13:22:29 crc kubenswrapper[4848]: I0128 13:22:29.328343 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" podStartSLOduration=1.810251811 podStartE2EDuration="2.328320095s" podCreationTimestamp="2026-01-28 13:22:27 +0000 UTC" firstStartedPulling="2026-01-28 13:22:28.28972929 +0000 UTC m=+2175.201946328" lastFinishedPulling="2026-01-28 13:22:28.807797574 +0000 UTC m=+2175.720014612" observedRunningTime="2026-01-28 13:22:29.315164025 +0000 UTC m=+2176.227381073" watchObservedRunningTime="2026-01-28 13:22:29.328320095 +0000 UTC m=+2176.240537133" Jan 28 13:22:37 crc kubenswrapper[4848]: I0128 13:22:37.924466 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:22:37 crc kubenswrapper[4848]: I0128 13:22:37.925820 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:23:07 crc kubenswrapper[4848]: I0128 13:23:07.924524 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:23:07 crc kubenswrapper[4848]: I0128 13:23:07.925298 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:23:07 crc kubenswrapper[4848]: I0128 13:23:07.925371 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:23:07 crc kubenswrapper[4848]: I0128 13:23:07.926408 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"07611487a31b7c45ba47ed64a959f661cead56f7ba8c4db44c7b948853391684"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:23:07 crc kubenswrapper[4848]: I0128 13:23:07.926490 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://07611487a31b7c45ba47ed64a959f661cead56f7ba8c4db44c7b948853391684" gracePeriod=600 Jan 28 13:23:08 crc kubenswrapper[4848]: I0128 13:23:08.727259 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="07611487a31b7c45ba47ed64a959f661cead56f7ba8c4db44c7b948853391684" exitCode=0 Jan 28 13:23:08 crc kubenswrapper[4848]: I0128 13:23:08.727284 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"07611487a31b7c45ba47ed64a959f661cead56f7ba8c4db44c7b948853391684"} Jan 28 13:23:08 crc kubenswrapper[4848]: I0128 13:23:08.727728 4848 scope.go:117] "RemoveContainer" containerID="5d90b94d7a7c13d738ab75f774d73b65d5a6b8be06dad4675f82292fee73d1e9" Jan 28 13:23:09 crc kubenswrapper[4848]: I0128 13:23:09.742150 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e"} Jan 28 13:23:39 crc kubenswrapper[4848]: I0128 13:23:39.051633 4848 generic.go:334] "Generic (PLEG): container finished" podID="ebc674d6-8c77-4481-b022-c91d7c77ec6e" containerID="77ce6e951e225a5d1ba4df808860df032523ee23a395cfdf27928050e53e02fc" exitCode=0 Jan 28 13:23:39 crc kubenswrapper[4848]: I0128 13:23:39.052881 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" event={"ID":"ebc674d6-8c77-4481-b022-c91d7c77ec6e","Type":"ContainerDied","Data":"77ce6e951e225a5d1ba4df808860df032523ee23a395cfdf27928050e53e02fc"} Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.581011 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.767537 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ssh-key-openstack-edpm-ipam\") pod \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.767677 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-inventory\") pod \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.767872 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovn-combined-ca-bundle\") pod \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.767998 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqbb4\" (UniqueName: \"kubernetes.io/projected/ebc674d6-8c77-4481-b022-c91d7c77ec6e-kube-api-access-xqbb4\") pod \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.768071 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovncontroller-config-0\") pod \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\" (UID: \"ebc674d6-8c77-4481-b022-c91d7c77ec6e\") " Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.776996 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "ebc674d6-8c77-4481-b022-c91d7c77ec6e" (UID: "ebc674d6-8c77-4481-b022-c91d7c77ec6e"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.780700 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebc674d6-8c77-4481-b022-c91d7c77ec6e-kube-api-access-xqbb4" (OuterVolumeSpecName: "kube-api-access-xqbb4") pod "ebc674d6-8c77-4481-b022-c91d7c77ec6e" (UID: "ebc674d6-8c77-4481-b022-c91d7c77ec6e"). InnerVolumeSpecName "kube-api-access-xqbb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.805180 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ebc674d6-8c77-4481-b022-c91d7c77ec6e" (UID: "ebc674d6-8c77-4481-b022-c91d7c77ec6e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.815121 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "ebc674d6-8c77-4481-b022-c91d7c77ec6e" (UID: "ebc674d6-8c77-4481-b022-c91d7c77ec6e"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.828140 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-inventory" (OuterVolumeSpecName: "inventory") pod "ebc674d6-8c77-4481-b022-c91d7c77ec6e" (UID: "ebc674d6-8c77-4481-b022-c91d7c77ec6e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.871463 4848 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.871505 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqbb4\" (UniqueName: \"kubernetes.io/projected/ebc674d6-8c77-4481-b022-c91d7c77ec6e-kube-api-access-xqbb4\") on node \"crc\" DevicePath \"\"" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.871519 4848 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.871530 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:23:40 crc kubenswrapper[4848]: I0128 13:23:40.871542 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebc674d6-8c77-4481-b022-c91d7c77ec6e-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.077410 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" event={"ID":"ebc674d6-8c77-4481-b022-c91d7c77ec6e","Type":"ContainerDied","Data":"c9ebce0f2879adebe61c173d36066679c2af0c8ae25459df6588e88f95e3bd3a"} Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.077463 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9ebce0f2879adebe61c173d36066679c2af0c8ae25459df6588e88f95e3bd3a" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.077544 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-jv56f" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.198975 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p"] Jan 28 13:23:41 crc kubenswrapper[4848]: E0128 13:23:41.199495 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebc674d6-8c77-4481-b022-c91d7c77ec6e" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.199519 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebc674d6-8c77-4481-b022-c91d7c77ec6e" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.199732 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebc674d6-8c77-4481-b022-c91d7c77ec6e" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.200619 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.208817 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.209810 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.210011 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.210500 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.210742 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.211169 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.213189 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p"] Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.381983 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.382635 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.382809 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.382913 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.382970 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8tmb\" (UniqueName: \"kubernetes.io/projected/d991fbd4-087c-475f-99cb-ccfab86bda67-kube-api-access-j8tmb\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.383125 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.484676 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.484805 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.484857 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.484890 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.484921 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8tmb\" (UniqueName: \"kubernetes.io/projected/d991fbd4-087c-475f-99cb-ccfab86bda67-kube-api-access-j8tmb\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.484985 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.490755 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.491044 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.491283 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.492140 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.494214 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.519593 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8tmb\" (UniqueName: \"kubernetes.io/projected/d991fbd4-087c-475f-99cb-ccfab86bda67-kube-api-access-j8tmb\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:41 crc kubenswrapper[4848]: I0128 13:23:41.524828 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:23:42 crc kubenswrapper[4848]: I0128 13:23:42.127968 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p"] Jan 28 13:23:43 crc kubenswrapper[4848]: I0128 13:23:43.163781 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" event={"ID":"d991fbd4-087c-475f-99cb-ccfab86bda67","Type":"ContainerStarted","Data":"793d7b5306fcb6afbf4628c38016aacb851d702fc37dbd6dd2274f57b111d74b"} Jan 28 13:23:44 crc kubenswrapper[4848]: I0128 13:23:44.177528 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" event={"ID":"d991fbd4-087c-475f-99cb-ccfab86bda67","Type":"ContainerStarted","Data":"eef3dbf21b93ea55905371114b294d407f7e664ad76f3907d925862aef539d07"} Jan 28 13:23:44 crc kubenswrapper[4848]: I0128 13:23:44.204818 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" podStartSLOduration=2.071620148 podStartE2EDuration="3.204782183s" podCreationTimestamp="2026-01-28 13:23:41 +0000 UTC" firstStartedPulling="2026-01-28 13:23:42.126962358 +0000 UTC m=+2249.039179396" lastFinishedPulling="2026-01-28 13:23:43.260124353 +0000 UTC m=+2250.172341431" observedRunningTime="2026-01-28 13:23:44.202558203 +0000 UTC m=+2251.114775241" watchObservedRunningTime="2026-01-28 13:23:44.204782183 +0000 UTC m=+2251.116999241" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.119013 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xdt8m"] Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.122577 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.134992 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xdt8m"] Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.197114 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-utilities\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.197190 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-catalog-content\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.197376 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rwrd\" (UniqueName: \"kubernetes.io/projected/2184b53f-80b5-4d24-9f69-2cafefe67f89-kube-api-access-8rwrd\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.299898 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-utilities\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.299949 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-catalog-content\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.300000 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rwrd\" (UniqueName: \"kubernetes.io/projected/2184b53f-80b5-4d24-9f69-2cafefe67f89-kube-api-access-8rwrd\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.301453 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-utilities\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.301568 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-catalog-content\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.325826 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rwrd\" (UniqueName: \"kubernetes.io/projected/2184b53f-80b5-4d24-9f69-2cafefe67f89-kube-api-access-8rwrd\") pod \"community-operators-xdt8m\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:10 crc kubenswrapper[4848]: I0128 13:24:10.448543 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:11 crc kubenswrapper[4848]: I0128 13:24:11.088943 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xdt8m"] Jan 28 13:24:11 crc kubenswrapper[4848]: I0128 13:24:11.477350 4848 generic.go:334] "Generic (PLEG): container finished" podID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerID="b4f62769487569bc7b64ce1af1865d86ee2eba7b296066f67e88f606c81d2ad9" exitCode=0 Jan 28 13:24:11 crc kubenswrapper[4848]: I0128 13:24:11.477416 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xdt8m" event={"ID":"2184b53f-80b5-4d24-9f69-2cafefe67f89","Type":"ContainerDied","Data":"b4f62769487569bc7b64ce1af1865d86ee2eba7b296066f67e88f606c81d2ad9"} Jan 28 13:24:11 crc kubenswrapper[4848]: I0128 13:24:11.477825 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xdt8m" event={"ID":"2184b53f-80b5-4d24-9f69-2cafefe67f89","Type":"ContainerStarted","Data":"3a2ddcfcdb50f2a3d0eca76790dd22b6952226dcc0f05742410234e740214b1c"} Jan 28 13:24:12 crc kubenswrapper[4848]: I0128 13:24:12.492666 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xdt8m" event={"ID":"2184b53f-80b5-4d24-9f69-2cafefe67f89","Type":"ContainerStarted","Data":"48296c8d4d7c83aa957a431cde9fd61bdb9f6d49015f11c56608756b37d80a23"} Jan 28 13:24:13 crc kubenswrapper[4848]: I0128 13:24:13.509669 4848 generic.go:334] "Generic (PLEG): container finished" podID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerID="48296c8d4d7c83aa957a431cde9fd61bdb9f6d49015f11c56608756b37d80a23" exitCode=0 Jan 28 13:24:13 crc kubenswrapper[4848]: I0128 13:24:13.509747 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xdt8m" event={"ID":"2184b53f-80b5-4d24-9f69-2cafefe67f89","Type":"ContainerDied","Data":"48296c8d4d7c83aa957a431cde9fd61bdb9f6d49015f11c56608756b37d80a23"} Jan 28 13:24:14 crc kubenswrapper[4848]: I0128 13:24:14.522626 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xdt8m" event={"ID":"2184b53f-80b5-4d24-9f69-2cafefe67f89","Type":"ContainerStarted","Data":"434d4c934e4133b34e7f7e0b949af22108362809d94e842d77a2e23c0917dd0e"} Jan 28 13:24:14 crc kubenswrapper[4848]: I0128 13:24:14.559484 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xdt8m" podStartSLOduration=2.124672087 podStartE2EDuration="4.559461458s" podCreationTimestamp="2026-01-28 13:24:10 +0000 UTC" firstStartedPulling="2026-01-28 13:24:11.479856494 +0000 UTC m=+2278.392073532" lastFinishedPulling="2026-01-28 13:24:13.914645865 +0000 UTC m=+2280.826862903" observedRunningTime="2026-01-28 13:24:14.552033405 +0000 UTC m=+2281.464250453" watchObservedRunningTime="2026-01-28 13:24:14.559461458 +0000 UTC m=+2281.471678496" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.492686 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lg2cp"] Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.499134 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.510360 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lg2cp"] Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.637014 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-catalog-content\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.637453 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-utilities\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.637708 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2l2t\" (UniqueName: \"kubernetes.io/projected/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-kube-api-access-k2l2t\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.740275 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-catalog-content\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.740651 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-utilities\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.740803 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2l2t\" (UniqueName: \"kubernetes.io/projected/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-kube-api-access-k2l2t\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.741979 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-catalog-content\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.742405 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-utilities\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.774817 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2l2t\" (UniqueName: \"kubernetes.io/projected/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-kube-api-access-k2l2t\") pod \"certified-operators-lg2cp\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:17 crc kubenswrapper[4848]: I0128 13:24:17.927021 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:18 crc kubenswrapper[4848]: I0128 13:24:18.531911 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lg2cp"] Jan 28 13:24:18 crc kubenswrapper[4848]: W0128 13:24:18.543427 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0cc0bfc_1ff5_456d_a3a5_3ab51b17af42.slice/crio-82526f05820788c41f4d367eb8acf8837be0501760b6e44f8b83f81828ecd70d WatchSource:0}: Error finding container 82526f05820788c41f4d367eb8acf8837be0501760b6e44f8b83f81828ecd70d: Status 404 returned error can't find the container with id 82526f05820788c41f4d367eb8acf8837be0501760b6e44f8b83f81828ecd70d Jan 28 13:24:18 crc kubenswrapper[4848]: I0128 13:24:18.568393 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cp" event={"ID":"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42","Type":"ContainerStarted","Data":"82526f05820788c41f4d367eb8acf8837be0501760b6e44f8b83f81828ecd70d"} Jan 28 13:24:19 crc kubenswrapper[4848]: I0128 13:24:19.585495 4848 generic.go:334] "Generic (PLEG): container finished" podID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerID="83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93" exitCode=0 Jan 28 13:24:19 crc kubenswrapper[4848]: I0128 13:24:19.585878 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cp" event={"ID":"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42","Type":"ContainerDied","Data":"83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93"} Jan 28 13:24:20 crc kubenswrapper[4848]: I0128 13:24:20.449329 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:20 crc kubenswrapper[4848]: I0128 13:24:20.449380 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:20 crc kubenswrapper[4848]: I0128 13:24:20.507580 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:20 crc kubenswrapper[4848]: I0128 13:24:20.648466 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:21 crc kubenswrapper[4848]: I0128 13:24:21.609845 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cp" event={"ID":"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42","Type":"ContainerStarted","Data":"dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0"} Jan 28 13:24:22 crc kubenswrapper[4848]: I0128 13:24:22.914130 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xdt8m"] Jan 28 13:24:22 crc kubenswrapper[4848]: I0128 13:24:22.914691 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xdt8m" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="registry-server" containerID="cri-o://434d4c934e4133b34e7f7e0b949af22108362809d94e842d77a2e23c0917dd0e" gracePeriod=2 Jan 28 13:24:23 crc kubenswrapper[4848]: I0128 13:24:23.632846 4848 generic.go:334] "Generic (PLEG): container finished" podID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerID="dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0" exitCode=0 Jan 28 13:24:23 crc kubenswrapper[4848]: I0128 13:24:23.632962 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cp" event={"ID":"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42","Type":"ContainerDied","Data":"dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0"} Jan 28 13:24:23 crc kubenswrapper[4848]: I0128 13:24:23.639673 4848 generic.go:334] "Generic (PLEG): container finished" podID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerID="434d4c934e4133b34e7f7e0b949af22108362809d94e842d77a2e23c0917dd0e" exitCode=0 Jan 28 13:24:23 crc kubenswrapper[4848]: I0128 13:24:23.639722 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xdt8m" event={"ID":"2184b53f-80b5-4d24-9f69-2cafefe67f89","Type":"ContainerDied","Data":"434d4c934e4133b34e7f7e0b949af22108362809d94e842d77a2e23c0917dd0e"} Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.288614 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.406297 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rwrd\" (UniqueName: \"kubernetes.io/projected/2184b53f-80b5-4d24-9f69-2cafefe67f89-kube-api-access-8rwrd\") pod \"2184b53f-80b5-4d24-9f69-2cafefe67f89\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.406379 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-catalog-content\") pod \"2184b53f-80b5-4d24-9f69-2cafefe67f89\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.406559 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-utilities\") pod \"2184b53f-80b5-4d24-9f69-2cafefe67f89\" (UID: \"2184b53f-80b5-4d24-9f69-2cafefe67f89\") " Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.407701 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-utilities" (OuterVolumeSpecName: "utilities") pod "2184b53f-80b5-4d24-9f69-2cafefe67f89" (UID: "2184b53f-80b5-4d24-9f69-2cafefe67f89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.413555 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2184b53f-80b5-4d24-9f69-2cafefe67f89-kube-api-access-8rwrd" (OuterVolumeSpecName: "kube-api-access-8rwrd") pod "2184b53f-80b5-4d24-9f69-2cafefe67f89" (UID: "2184b53f-80b5-4d24-9f69-2cafefe67f89"). InnerVolumeSpecName "kube-api-access-8rwrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.508622 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.508663 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rwrd\" (UniqueName: \"kubernetes.io/projected/2184b53f-80b5-4d24-9f69-2cafefe67f89-kube-api-access-8rwrd\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.652563 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xdt8m" event={"ID":"2184b53f-80b5-4d24-9f69-2cafefe67f89","Type":"ContainerDied","Data":"3a2ddcfcdb50f2a3d0eca76790dd22b6952226dcc0f05742410234e740214b1c"} Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.652621 4848 scope.go:117] "RemoveContainer" containerID="434d4c934e4133b34e7f7e0b949af22108362809d94e842d77a2e23c0917dd0e" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.652768 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xdt8m" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.682865 4848 scope.go:117] "RemoveContainer" containerID="48296c8d4d7c83aa957a431cde9fd61bdb9f6d49015f11c56608756b37d80a23" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.719826 4848 scope.go:117] "RemoveContainer" containerID="b4f62769487569bc7b64ce1af1865d86ee2eba7b296066f67e88f606c81d2ad9" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.826348 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2184b53f-80b5-4d24-9f69-2cafefe67f89" (UID: "2184b53f-80b5-4d24-9f69-2cafefe67f89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.919376 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2184b53f-80b5-4d24-9f69-2cafefe67f89-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.985228 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xdt8m"] Jan 28 13:24:24 crc kubenswrapper[4848]: I0128 13:24:24.993982 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xdt8m"] Jan 28 13:24:25 crc kubenswrapper[4848]: I0128 13:24:25.667165 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cp" event={"ID":"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42","Type":"ContainerStarted","Data":"97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2"} Jan 28 13:24:25 crc kubenswrapper[4848]: I0128 13:24:25.703227 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lg2cp" podStartSLOduration=3.272332402 podStartE2EDuration="8.703200367s" podCreationTimestamp="2026-01-28 13:24:17 +0000 UTC" firstStartedPulling="2026-01-28 13:24:19.591547026 +0000 UTC m=+2286.503764064" lastFinishedPulling="2026-01-28 13:24:25.022414991 +0000 UTC m=+2291.934632029" observedRunningTime="2026-01-28 13:24:25.694348746 +0000 UTC m=+2292.606565794" watchObservedRunningTime="2026-01-28 13:24:25.703200367 +0000 UTC m=+2292.615417405" Jan 28 13:24:26 crc kubenswrapper[4848]: I0128 13:24:26.871046 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" path="/var/lib/kubelet/pods/2184b53f-80b5-4d24-9f69-2cafefe67f89/volumes" Jan 28 13:24:27 crc kubenswrapper[4848]: I0128 13:24:27.927942 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:27 crc kubenswrapper[4848]: I0128 13:24:27.928353 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:27 crc kubenswrapper[4848]: I0128 13:24:27.992351 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:33 crc kubenswrapper[4848]: E0128 13:24:33.400211 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd991fbd4_087c_475f_99cb_ccfab86bda67.slice/crio-conmon-eef3dbf21b93ea55905371114b294d407f7e664ad76f3907d925862aef539d07.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd991fbd4_087c_475f_99cb_ccfab86bda67.slice/crio-eef3dbf21b93ea55905371114b294d407f7e664ad76f3907d925862aef539d07.scope\": RecentStats: unable to find data in memory cache]" Jan 28 13:24:33 crc kubenswrapper[4848]: I0128 13:24:33.787663 4848 generic.go:334] "Generic (PLEG): container finished" podID="d991fbd4-087c-475f-99cb-ccfab86bda67" containerID="eef3dbf21b93ea55905371114b294d407f7e664ad76f3907d925862aef539d07" exitCode=0 Jan 28 13:24:33 crc kubenswrapper[4848]: I0128 13:24:33.787717 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" event={"ID":"d991fbd4-087c-475f-99cb-ccfab86bda67","Type":"ContainerDied","Data":"eef3dbf21b93ea55905371114b294d407f7e664ad76f3907d925862aef539d07"} Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.312665 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.381099 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-ovn-metadata-agent-neutron-config-0\") pod \"d991fbd4-087c-475f-99cb-ccfab86bda67\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.381188 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-inventory\") pod \"d991fbd4-087c-475f-99cb-ccfab86bda67\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.381227 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-metadata-combined-ca-bundle\") pod \"d991fbd4-087c-475f-99cb-ccfab86bda67\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.381296 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-ssh-key-openstack-edpm-ipam\") pod \"d991fbd4-087c-475f-99cb-ccfab86bda67\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.381359 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8tmb\" (UniqueName: \"kubernetes.io/projected/d991fbd4-087c-475f-99cb-ccfab86bda67-kube-api-access-j8tmb\") pod \"d991fbd4-087c-475f-99cb-ccfab86bda67\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.381480 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-nova-metadata-neutron-config-0\") pod \"d991fbd4-087c-475f-99cb-ccfab86bda67\" (UID: \"d991fbd4-087c-475f-99cb-ccfab86bda67\") " Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.394445 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d991fbd4-087c-475f-99cb-ccfab86bda67-kube-api-access-j8tmb" (OuterVolumeSpecName: "kube-api-access-j8tmb") pod "d991fbd4-087c-475f-99cb-ccfab86bda67" (UID: "d991fbd4-087c-475f-99cb-ccfab86bda67"). InnerVolumeSpecName "kube-api-access-j8tmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.398686 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "d991fbd4-087c-475f-99cb-ccfab86bda67" (UID: "d991fbd4-087c-475f-99cb-ccfab86bda67"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.415693 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "d991fbd4-087c-475f-99cb-ccfab86bda67" (UID: "d991fbd4-087c-475f-99cb-ccfab86bda67"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.417514 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "d991fbd4-087c-475f-99cb-ccfab86bda67" (UID: "d991fbd4-087c-475f-99cb-ccfab86bda67"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.422296 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d991fbd4-087c-475f-99cb-ccfab86bda67" (UID: "d991fbd4-087c-475f-99cb-ccfab86bda67"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.440596 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-inventory" (OuterVolumeSpecName: "inventory") pod "d991fbd4-087c-475f-99cb-ccfab86bda67" (UID: "d991fbd4-087c-475f-99cb-ccfab86bda67"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.484602 4848 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.484684 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.484701 4848 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.484722 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.484735 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8tmb\" (UniqueName: \"kubernetes.io/projected/d991fbd4-087c-475f-99cb-ccfab86bda67-kube-api-access-j8tmb\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.484744 4848 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d991fbd4-087c-475f-99cb-ccfab86bda67-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.813118 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" event={"ID":"d991fbd4-087c-475f-99cb-ccfab86bda67","Type":"ContainerDied","Data":"793d7b5306fcb6afbf4628c38016aacb851d702fc37dbd6dd2274f57b111d74b"} Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.813177 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="793d7b5306fcb6afbf4628c38016aacb851d702fc37dbd6dd2274f57b111d74b" Jan 28 13:24:35 crc kubenswrapper[4848]: I0128 13:24:35.813272 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.032529 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf"] Jan 28 13:24:36 crc kubenswrapper[4848]: E0128 13:24:36.033044 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="extract-utilities" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.033065 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="extract-utilities" Jan 28 13:24:36 crc kubenswrapper[4848]: E0128 13:24:36.033095 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="extract-content" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.033103 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="extract-content" Jan 28 13:24:36 crc kubenswrapper[4848]: E0128 13:24:36.033114 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d991fbd4-087c-475f-99cb-ccfab86bda67" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.033122 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d991fbd4-087c-475f-99cb-ccfab86bda67" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 13:24:36 crc kubenswrapper[4848]: E0128 13:24:36.033140 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="registry-server" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.033146 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="registry-server" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.033361 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="d991fbd4-087c-475f-99cb-ccfab86bda67" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.033375 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="2184b53f-80b5-4d24-9f69-2cafefe67f89" containerName="registry-server" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.034159 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.039457 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.039664 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.039729 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.040213 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.040518 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.055949 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf"] Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.098079 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.098588 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st7b7\" (UniqueName: \"kubernetes.io/projected/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-kube-api-access-st7b7\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.098643 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.098876 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.099221 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.201620 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.201776 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.201890 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.201929 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st7b7\" (UniqueName: \"kubernetes.io/projected/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-kube-api-access-st7b7\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.201984 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.208663 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.209377 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.214478 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.215308 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.225781 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st7b7\" (UniqueName: \"kubernetes.io/projected/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-kube-api-access-st7b7\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-thzjf\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.362178 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:24:36 crc kubenswrapper[4848]: I0128 13:24:36.953552 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf"] Jan 28 13:24:37 crc kubenswrapper[4848]: I0128 13:24:37.836886 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" event={"ID":"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8","Type":"ContainerStarted","Data":"2c97e8a7827877fd6188c601c2ee44e3a7681cc0b98209be6c66da3b9bbc048e"} Jan 28 13:24:37 crc kubenswrapper[4848]: I0128 13:24:37.837345 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" event={"ID":"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8","Type":"ContainerStarted","Data":"256f6eae41414c7c8f182f91c6efc5f91cd70906593ee36db44711656cf20304"} Jan 28 13:24:37 crc kubenswrapper[4848]: I0128 13:24:37.858996 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" podStartSLOduration=1.428079031 podStartE2EDuration="1.858964634s" podCreationTimestamp="2026-01-28 13:24:36 +0000 UTC" firstStartedPulling="2026-01-28 13:24:36.962889431 +0000 UTC m=+2303.875106469" lastFinishedPulling="2026-01-28 13:24:37.393775034 +0000 UTC m=+2304.305992072" observedRunningTime="2026-01-28 13:24:37.852710983 +0000 UTC m=+2304.764928031" watchObservedRunningTime="2026-01-28 13:24:37.858964634 +0000 UTC m=+2304.771181672" Jan 28 13:24:37 crc kubenswrapper[4848]: I0128 13:24:37.992085 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:38 crc kubenswrapper[4848]: I0128 13:24:38.063842 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lg2cp"] Jan 28 13:24:38 crc kubenswrapper[4848]: I0128 13:24:38.855568 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lg2cp" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="registry-server" containerID="cri-o://97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2" gracePeriod=2 Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.393433 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.486874 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-utilities\") pod \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.486976 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2l2t\" (UniqueName: \"kubernetes.io/projected/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-kube-api-access-k2l2t\") pod \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.487132 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-catalog-content\") pod \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\" (UID: \"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42\") " Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.487932 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-utilities" (OuterVolumeSpecName: "utilities") pod "a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" (UID: "a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.492052 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.495364 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-kube-api-access-k2l2t" (OuterVolumeSpecName: "kube-api-access-k2l2t") pod "a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" (UID: "a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42"). InnerVolumeSpecName "kube-api-access-k2l2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.545618 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" (UID: "a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.596536 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.596606 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2l2t\" (UniqueName: \"kubernetes.io/projected/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42-kube-api-access-k2l2t\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.870803 4848 generic.go:334] "Generic (PLEG): container finished" podID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerID="97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2" exitCode=0 Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.870874 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cp" event={"ID":"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42","Type":"ContainerDied","Data":"97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2"} Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.870921 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cp" event={"ID":"a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42","Type":"ContainerDied","Data":"82526f05820788c41f4d367eb8acf8837be0501760b6e44f8b83f81828ecd70d"} Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.870947 4848 scope.go:117] "RemoveContainer" containerID="97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.871015 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cp" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.905183 4848 scope.go:117] "RemoveContainer" containerID="dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.935343 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lg2cp"] Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.951090 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lg2cp"] Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.956220 4848 scope.go:117] "RemoveContainer" containerID="83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.996395 4848 scope.go:117] "RemoveContainer" containerID="97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2" Jan 28 13:24:39 crc kubenswrapper[4848]: E0128 13:24:39.997063 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2\": container with ID starting with 97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2 not found: ID does not exist" containerID="97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.997137 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2"} err="failed to get container status \"97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2\": rpc error: code = NotFound desc = could not find container \"97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2\": container with ID starting with 97b23c802290250579bd4ce7e498ecf4693dc9ed0e27c0bc60500be1377611c2 not found: ID does not exist" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.997183 4848 scope.go:117] "RemoveContainer" containerID="dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0" Jan 28 13:24:39 crc kubenswrapper[4848]: E0128 13:24:39.997990 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0\": container with ID starting with dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0 not found: ID does not exist" containerID="dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.998037 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0"} err="failed to get container status \"dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0\": rpc error: code = NotFound desc = could not find container \"dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0\": container with ID starting with dd8eb3098151ee0e5a67c4020fd8ed74a3715efc2c07b9bd2cbf2047109729a0 not found: ID does not exist" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.998071 4848 scope.go:117] "RemoveContainer" containerID="83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93" Jan 28 13:24:39 crc kubenswrapper[4848]: E0128 13:24:39.998472 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93\": container with ID starting with 83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93 not found: ID does not exist" containerID="83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93" Jan 28 13:24:39 crc kubenswrapper[4848]: I0128 13:24:39.998502 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93"} err="failed to get container status \"83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93\": rpc error: code = NotFound desc = could not find container \"83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93\": container with ID starting with 83fb84ec78f6bba3515b059b46af9989c632c2e19fbed76eeb720dc1a85daa93 not found: ID does not exist" Jan 28 13:24:40 crc kubenswrapper[4848]: I0128 13:24:40.871110 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" path="/var/lib/kubelet/pods/a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42/volumes" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.827557 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6fv5l"] Jan 28 13:24:43 crc kubenswrapper[4848]: E0128 13:24:43.828471 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="registry-server" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.828488 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="registry-server" Jan 28 13:24:43 crc kubenswrapper[4848]: E0128 13:24:43.828506 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="extract-content" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.828513 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="extract-content" Jan 28 13:24:43 crc kubenswrapper[4848]: E0128 13:24:43.828524 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="extract-utilities" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.828532 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="extract-utilities" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.828713 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0cc0bfc-1ff5-456d-a3a5-3ab51b17af42" containerName="registry-server" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.831121 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.854228 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6fv5l"] Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.900427 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-utilities\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.900544 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48b8j\" (UniqueName: \"kubernetes.io/projected/81fbab8d-cadd-45ca-9b98-79a80cb632c8-kube-api-access-48b8j\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:43 crc kubenswrapper[4848]: I0128 13:24:43.900645 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-catalog-content\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.002975 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-utilities\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.003076 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48b8j\" (UniqueName: \"kubernetes.io/projected/81fbab8d-cadd-45ca-9b98-79a80cb632c8-kube-api-access-48b8j\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.003154 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-catalog-content\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.003732 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-utilities\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.003829 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-catalog-content\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.027660 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48b8j\" (UniqueName: \"kubernetes.io/projected/81fbab8d-cadd-45ca-9b98-79a80cb632c8-kube-api-access-48b8j\") pod \"redhat-marketplace-6fv5l\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.163499 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.671973 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6fv5l"] Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.962573 4848 generic.go:334] "Generic (PLEG): container finished" podID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerID="07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e" exitCode=0 Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.962628 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6fv5l" event={"ID":"81fbab8d-cadd-45ca-9b98-79a80cb632c8","Type":"ContainerDied","Data":"07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e"} Jan 28 13:24:44 crc kubenswrapper[4848]: I0128 13:24:44.962665 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6fv5l" event={"ID":"81fbab8d-cadd-45ca-9b98-79a80cb632c8","Type":"ContainerStarted","Data":"1ae3317affbbb1d1e70b8ffeb12bce1dabb09471ab8d1e8eada1794d9aeb4474"} Jan 28 13:24:45 crc kubenswrapper[4848]: I0128 13:24:45.980390 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6fv5l" event={"ID":"81fbab8d-cadd-45ca-9b98-79a80cb632c8","Type":"ContainerStarted","Data":"cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2"} Jan 28 13:24:47 crc kubenswrapper[4848]: I0128 13:24:47.013672 4848 generic.go:334] "Generic (PLEG): container finished" podID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerID="cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2" exitCode=0 Jan 28 13:24:47 crc kubenswrapper[4848]: I0128 13:24:47.013769 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6fv5l" event={"ID":"81fbab8d-cadd-45ca-9b98-79a80cb632c8","Type":"ContainerDied","Data":"cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2"} Jan 28 13:24:48 crc kubenswrapper[4848]: I0128 13:24:48.034985 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6fv5l" event={"ID":"81fbab8d-cadd-45ca-9b98-79a80cb632c8","Type":"ContainerStarted","Data":"969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9"} Jan 28 13:24:48 crc kubenswrapper[4848]: I0128 13:24:48.073964 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6fv5l" podStartSLOduration=2.264578371 podStartE2EDuration="5.073937008s" podCreationTimestamp="2026-01-28 13:24:43 +0000 UTC" firstStartedPulling="2026-01-28 13:24:44.968624171 +0000 UTC m=+2311.880841219" lastFinishedPulling="2026-01-28 13:24:47.777982808 +0000 UTC m=+2314.690199856" observedRunningTime="2026-01-28 13:24:48.068438837 +0000 UTC m=+2314.980655875" watchObservedRunningTime="2026-01-28 13:24:48.073937008 +0000 UTC m=+2314.986154046" Jan 28 13:24:54 crc kubenswrapper[4848]: I0128 13:24:54.163640 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:54 crc kubenswrapper[4848]: I0128 13:24:54.165371 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:54 crc kubenswrapper[4848]: I0128 13:24:54.217688 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:55 crc kubenswrapper[4848]: I0128 13:24:55.185057 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:55 crc kubenswrapper[4848]: I0128 13:24:55.246063 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6fv5l"] Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.153206 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6fv5l" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="registry-server" containerID="cri-o://969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9" gracePeriod=2 Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.677387 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.755635 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48b8j\" (UniqueName: \"kubernetes.io/projected/81fbab8d-cadd-45ca-9b98-79a80cb632c8-kube-api-access-48b8j\") pod \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.755788 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-utilities\") pod \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.755922 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-catalog-content\") pod \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\" (UID: \"81fbab8d-cadd-45ca-9b98-79a80cb632c8\") " Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.756762 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-utilities" (OuterVolumeSpecName: "utilities") pod "81fbab8d-cadd-45ca-9b98-79a80cb632c8" (UID: "81fbab8d-cadd-45ca-9b98-79a80cb632c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.766963 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81fbab8d-cadd-45ca-9b98-79a80cb632c8-kube-api-access-48b8j" (OuterVolumeSpecName: "kube-api-access-48b8j") pod "81fbab8d-cadd-45ca-9b98-79a80cb632c8" (UID: "81fbab8d-cadd-45ca-9b98-79a80cb632c8"). InnerVolumeSpecName "kube-api-access-48b8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.793052 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81fbab8d-cadd-45ca-9b98-79a80cb632c8" (UID: "81fbab8d-cadd-45ca-9b98-79a80cb632c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.858618 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.858653 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48b8j\" (UniqueName: \"kubernetes.io/projected/81fbab8d-cadd-45ca-9b98-79a80cb632c8-kube-api-access-48b8j\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:57 crc kubenswrapper[4848]: I0128 13:24:57.858667 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab8d-cadd-45ca-9b98-79a80cb632c8-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.168874 4848 generic.go:334] "Generic (PLEG): container finished" podID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerID="969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9" exitCode=0 Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.168917 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6fv5l" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.168945 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6fv5l" event={"ID":"81fbab8d-cadd-45ca-9b98-79a80cb632c8","Type":"ContainerDied","Data":"969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9"} Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.168990 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6fv5l" event={"ID":"81fbab8d-cadd-45ca-9b98-79a80cb632c8","Type":"ContainerDied","Data":"1ae3317affbbb1d1e70b8ffeb12bce1dabb09471ab8d1e8eada1794d9aeb4474"} Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.169021 4848 scope.go:117] "RemoveContainer" containerID="969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.230542 4848 scope.go:117] "RemoveContainer" containerID="cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.231167 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6fv5l"] Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.247139 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6fv5l"] Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.264476 4848 scope.go:117] "RemoveContainer" containerID="07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.340774 4848 scope.go:117] "RemoveContainer" containerID="969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9" Jan 28 13:24:58 crc kubenswrapper[4848]: E0128 13:24:58.341550 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9\": container with ID starting with 969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9 not found: ID does not exist" containerID="969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.341887 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9"} err="failed to get container status \"969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9\": rpc error: code = NotFound desc = could not find container \"969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9\": container with ID starting with 969e62d3ee0a65ce275f76b847edb5050c998a62a534749518b5027cbb57b9f9 not found: ID does not exist" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.342078 4848 scope.go:117] "RemoveContainer" containerID="cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2" Jan 28 13:24:58 crc kubenswrapper[4848]: E0128 13:24:58.342947 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2\": container with ID starting with cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2 not found: ID does not exist" containerID="cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.343021 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2"} err="failed to get container status \"cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2\": rpc error: code = NotFound desc = could not find container \"cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2\": container with ID starting with cb1809a459381f7bf24542fd0a10dcb301b230261832b9a64f40ef7dcb5f10f2 not found: ID does not exist" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.343071 4848 scope.go:117] "RemoveContainer" containerID="07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e" Jan 28 13:24:58 crc kubenswrapper[4848]: E0128 13:24:58.343748 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e\": container with ID starting with 07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e not found: ID does not exist" containerID="07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.343786 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e"} err="failed to get container status \"07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e\": rpc error: code = NotFound desc = could not find container \"07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e\": container with ID starting with 07b6e518f5c8d40180c3d8ec9b72e025def2031c1b372db0822be03f2bb0435e not found: ID does not exist" Jan 28 13:24:58 crc kubenswrapper[4848]: I0128 13:24:58.868534 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" path="/var/lib/kubelet/pods/81fbab8d-cadd-45ca-9b98-79a80cb632c8/volumes" Jan 28 13:25:37 crc kubenswrapper[4848]: I0128 13:25:37.924382 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:25:37 crc kubenswrapper[4848]: I0128 13:25:37.925470 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:26:07 crc kubenswrapper[4848]: I0128 13:26:07.924352 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:26:07 crc kubenswrapper[4848]: I0128 13:26:07.925159 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:26:37 crc kubenswrapper[4848]: I0128 13:26:37.925356 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:26:37 crc kubenswrapper[4848]: I0128 13:26:37.926000 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:26:37 crc kubenswrapper[4848]: I0128 13:26:37.926061 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:26:37 crc kubenswrapper[4848]: I0128 13:26:37.927195 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:26:37 crc kubenswrapper[4848]: I0128 13:26:37.927307 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" gracePeriod=600 Jan 28 13:26:38 crc kubenswrapper[4848]: E0128 13:26:38.083497 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:26:38 crc kubenswrapper[4848]: I0128 13:26:38.514752 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" exitCode=0 Jan 28 13:26:38 crc kubenswrapper[4848]: I0128 13:26:38.514810 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e"} Jan 28 13:26:38 crc kubenswrapper[4848]: I0128 13:26:38.514859 4848 scope.go:117] "RemoveContainer" containerID="07611487a31b7c45ba47ed64a959f661cead56f7ba8c4db44c7b948853391684" Jan 28 13:26:38 crc kubenswrapper[4848]: I0128 13:26:38.515548 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:26:38 crc kubenswrapper[4848]: E0128 13:26:38.515831 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:26:48 crc kubenswrapper[4848]: I0128 13:26:48.850747 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:26:48 crc kubenswrapper[4848]: E0128 13:26:48.851836 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:27:01 crc kubenswrapper[4848]: I0128 13:27:01.850119 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:27:01 crc kubenswrapper[4848]: E0128 13:27:01.852117 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:27:16 crc kubenswrapper[4848]: I0128 13:27:16.850436 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:27:16 crc kubenswrapper[4848]: E0128 13:27:16.851552 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:27:28 crc kubenswrapper[4848]: I0128 13:27:28.851632 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:27:28 crc kubenswrapper[4848]: E0128 13:27:28.853118 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:27:43 crc kubenswrapper[4848]: I0128 13:27:43.850942 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:27:43 crc kubenswrapper[4848]: E0128 13:27:43.852169 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:27:58 crc kubenswrapper[4848]: I0128 13:27:58.851005 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:27:58 crc kubenswrapper[4848]: E0128 13:27:58.852443 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:28:13 crc kubenswrapper[4848]: I0128 13:28:13.851541 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:28:13 crc kubenswrapper[4848]: E0128 13:28:13.853081 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:28:26 crc kubenswrapper[4848]: I0128 13:28:26.850871 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:28:26 crc kubenswrapper[4848]: E0128 13:28:26.851804 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:28:40 crc kubenswrapper[4848]: I0128 13:28:40.852324 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:28:40 crc kubenswrapper[4848]: E0128 13:28:40.853312 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:28:47 crc kubenswrapper[4848]: I0128 13:28:47.912746 4848 generic.go:334] "Generic (PLEG): container finished" podID="d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" containerID="2c97e8a7827877fd6188c601c2ee44e3a7681cc0b98209be6c66da3b9bbc048e" exitCode=0 Jan 28 13:28:47 crc kubenswrapper[4848]: I0128 13:28:47.912847 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" event={"ID":"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8","Type":"ContainerDied","Data":"2c97e8a7827877fd6188c601c2ee44e3a7681cc0b98209be6c66da3b9bbc048e"} Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.467232 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.668082 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-secret-0\") pod \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.668976 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-combined-ca-bundle\") pod \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.669074 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st7b7\" (UniqueName: \"kubernetes.io/projected/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-kube-api-access-st7b7\") pod \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.669123 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-inventory\") pod \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.669229 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-ssh-key-openstack-edpm-ipam\") pod \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\" (UID: \"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8\") " Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.677283 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" (UID: "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.680483 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-kube-api-access-st7b7" (OuterVolumeSpecName: "kube-api-access-st7b7") pod "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" (UID: "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8"). InnerVolumeSpecName "kube-api-access-st7b7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.706159 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" (UID: "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.706650 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" (UID: "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.706673 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-inventory" (OuterVolumeSpecName: "inventory") pod "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" (UID: "d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.773688 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st7b7\" (UniqueName: \"kubernetes.io/projected/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-kube-api-access-st7b7\") on node \"crc\" DevicePath \"\"" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.773852 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.773946 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.774034 4848 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.774120 4848 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.943121 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" event={"ID":"d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8","Type":"ContainerDied","Data":"256f6eae41414c7c8f182f91c6efc5f91cd70906593ee36db44711656cf20304"} Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.943689 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="256f6eae41414c7c8f182f91c6efc5f91cd70906593ee36db44711656cf20304" Jan 28 13:28:49 crc kubenswrapper[4848]: I0128 13:28:49.943446 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-thzjf" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.075681 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n"] Jan 28 13:28:50 crc kubenswrapper[4848]: E0128 13:28:50.077348 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="extract-utilities" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.077374 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="extract-utilities" Jan 28 13:28:50 crc kubenswrapper[4848]: E0128 13:28:50.077404 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="registry-server" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.077412 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="registry-server" Jan 28 13:28:50 crc kubenswrapper[4848]: E0128 13:28:50.077422 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="extract-content" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.077429 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="extract-content" Jan 28 13:28:50 crc kubenswrapper[4848]: E0128 13:28:50.077437 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.077445 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.077633 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="81fbab8d-cadd-45ca-9b98-79a80cb632c8" containerName="registry-server" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.077655 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.078395 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.086841 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.087816 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.087926 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.088028 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.088389 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.088515 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.088743 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.090765 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.090814 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.090861 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.090913 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.090936 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.090996 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.091226 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.091262 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qr6x\" (UniqueName: \"kubernetes.io/projected/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-kube-api-access-7qr6x\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.091302 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.093206 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n"] Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.193383 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.193913 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.194068 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.195593 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.195686 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.195836 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.195940 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.196015 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qr6x\" (UniqueName: \"kubernetes.io/projected/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-kube-api-access-7qr6x\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.196128 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.198692 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.198761 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.199936 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.201069 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.202628 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.204379 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.210763 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.214919 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.216011 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qr6x\" (UniqueName: \"kubernetes.io/projected/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-kube-api-access-7qr6x\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhf8n\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.399538 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.970155 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n"] Jan 28 13:28:50 crc kubenswrapper[4848]: I0128 13:28:50.979773 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:28:51 crc kubenswrapper[4848]: I0128 13:28:51.963686 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" event={"ID":"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f","Type":"ContainerStarted","Data":"cd1ebe480df4baf81052f2d4ef58a1b3b472198f230cabc7c4ea55e0b2351239"} Jan 28 13:28:52 crc kubenswrapper[4848]: I0128 13:28:52.977999 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" event={"ID":"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f","Type":"ContainerStarted","Data":"690119869cf1c47d0d5becf08c8320634a8d1e18c2eb327a62aced1054237d49"} Jan 28 13:28:53 crc kubenswrapper[4848]: I0128 13:28:53.006360 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" podStartSLOduration=1.9950206069999998 podStartE2EDuration="3.006338044s" podCreationTimestamp="2026-01-28 13:28:50 +0000 UTC" firstStartedPulling="2026-01-28 13:28:50.979548686 +0000 UTC m=+2557.891765714" lastFinishedPulling="2026-01-28 13:28:51.990866113 +0000 UTC m=+2558.903083151" observedRunningTime="2026-01-28 13:28:53.004761432 +0000 UTC m=+2559.916978510" watchObservedRunningTime="2026-01-28 13:28:53.006338044 +0000 UTC m=+2559.918555082" Jan 28 13:28:54 crc kubenswrapper[4848]: I0128 13:28:54.860676 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:28:54 crc kubenswrapper[4848]: E0128 13:28:54.861313 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:29:06 crc kubenswrapper[4848]: I0128 13:29:06.859716 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:29:06 crc kubenswrapper[4848]: E0128 13:29:06.861515 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:29:20 crc kubenswrapper[4848]: I0128 13:29:20.850971 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:29:20 crc kubenswrapper[4848]: E0128 13:29:20.852131 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:29:35 crc kubenswrapper[4848]: I0128 13:29:35.850346 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:29:35 crc kubenswrapper[4848]: E0128 13:29:35.851673 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:29:50 crc kubenswrapper[4848]: I0128 13:29:50.851267 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:29:50 crc kubenswrapper[4848]: E0128 13:29:50.852392 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.164617 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz"] Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.167211 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.171438 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.171438 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.181129 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz"] Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.264807 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvfjx\" (UniqueName: \"kubernetes.io/projected/9d52ea87-50d4-46fc-a882-2a2966210069-kube-api-access-fvfjx\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.264866 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d52ea87-50d4-46fc-a882-2a2966210069-config-volume\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.265007 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d52ea87-50d4-46fc-a882-2a2966210069-secret-volume\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.368144 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d52ea87-50d4-46fc-a882-2a2966210069-secret-volume\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.368399 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvfjx\" (UniqueName: \"kubernetes.io/projected/9d52ea87-50d4-46fc-a882-2a2966210069-kube-api-access-fvfjx\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.368454 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d52ea87-50d4-46fc-a882-2a2966210069-config-volume\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.371625 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d52ea87-50d4-46fc-a882-2a2966210069-config-volume\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.393647 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d52ea87-50d4-46fc-a882-2a2966210069-secret-volume\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.409158 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvfjx\" (UniqueName: \"kubernetes.io/projected/9d52ea87-50d4-46fc-a882-2a2966210069-kube-api-access-fvfjx\") pod \"collect-profiles-29493450-ktmsz\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:00 crc kubenswrapper[4848]: I0128 13:30:00.504703 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:01 crc kubenswrapper[4848]: I0128 13:30:01.034668 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz"] Jan 28 13:30:01 crc kubenswrapper[4848]: I0128 13:30:01.850729 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:30:01 crc kubenswrapper[4848]: E0128 13:30:01.853355 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:30:01 crc kubenswrapper[4848]: I0128 13:30:01.905998 4848 generic.go:334] "Generic (PLEG): container finished" podID="9d52ea87-50d4-46fc-a882-2a2966210069" containerID="0ec6a5cd9b9a54715b8c38fc657f0bbf60bd62dc68d71b2e847fe9d27cfb84b3" exitCode=0 Jan 28 13:30:01 crc kubenswrapper[4848]: I0128 13:30:01.906063 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" event={"ID":"9d52ea87-50d4-46fc-a882-2a2966210069","Type":"ContainerDied","Data":"0ec6a5cd9b9a54715b8c38fc657f0bbf60bd62dc68d71b2e847fe9d27cfb84b3"} Jan 28 13:30:01 crc kubenswrapper[4848]: I0128 13:30:01.906099 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" event={"ID":"9d52ea87-50d4-46fc-a882-2a2966210069","Type":"ContainerStarted","Data":"361d2fbf90422e2457faa8c14eee8ad08b8d03da1c7006bad71b8d301e593f28"} Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.332892 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.451208 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d52ea87-50d4-46fc-a882-2a2966210069-config-volume\") pod \"9d52ea87-50d4-46fc-a882-2a2966210069\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.451335 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvfjx\" (UniqueName: \"kubernetes.io/projected/9d52ea87-50d4-46fc-a882-2a2966210069-kube-api-access-fvfjx\") pod \"9d52ea87-50d4-46fc-a882-2a2966210069\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.451501 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d52ea87-50d4-46fc-a882-2a2966210069-secret-volume\") pod \"9d52ea87-50d4-46fc-a882-2a2966210069\" (UID: \"9d52ea87-50d4-46fc-a882-2a2966210069\") " Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.452707 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d52ea87-50d4-46fc-a882-2a2966210069-config-volume" (OuterVolumeSpecName: "config-volume") pod "9d52ea87-50d4-46fc-a882-2a2966210069" (UID: "9d52ea87-50d4-46fc-a882-2a2966210069"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.459417 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d52ea87-50d4-46fc-a882-2a2966210069-kube-api-access-fvfjx" (OuterVolumeSpecName: "kube-api-access-fvfjx") pod "9d52ea87-50d4-46fc-a882-2a2966210069" (UID: "9d52ea87-50d4-46fc-a882-2a2966210069"). InnerVolumeSpecName "kube-api-access-fvfjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.460004 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d52ea87-50d4-46fc-a882-2a2966210069-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9d52ea87-50d4-46fc-a882-2a2966210069" (UID: "9d52ea87-50d4-46fc-a882-2a2966210069"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.554946 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d52ea87-50d4-46fc-a882-2a2966210069-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.554992 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d52ea87-50d4-46fc-a882-2a2966210069-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.555003 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvfjx\" (UniqueName: \"kubernetes.io/projected/9d52ea87-50d4-46fc-a882-2a2966210069-kube-api-access-fvfjx\") on node \"crc\" DevicePath \"\"" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.934222 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" event={"ID":"9d52ea87-50d4-46fc-a882-2a2966210069","Type":"ContainerDied","Data":"361d2fbf90422e2457faa8c14eee8ad08b8d03da1c7006bad71b8d301e593f28"} Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.934327 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="361d2fbf90422e2457faa8c14eee8ad08b8d03da1c7006bad71b8d301e593f28" Jan 28 13:30:03 crc kubenswrapper[4848]: I0128 13:30:03.934334 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz" Jan 28 13:30:04 crc kubenswrapper[4848]: I0128 13:30:04.428773 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp"] Jan 28 13:30:04 crc kubenswrapper[4848]: I0128 13:30:04.439532 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-thrcp"] Jan 28 13:30:04 crc kubenswrapper[4848]: I0128 13:30:04.867205 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a78c59e1-105e-4581-a0bb-27c1d78dbdee" path="/var/lib/kubelet/pods/a78c59e1-105e-4581-a0bb-27c1d78dbdee/volumes" Jan 28 13:30:05 crc kubenswrapper[4848]: I0128 13:30:05.637861 4848 scope.go:117] "RemoveContainer" containerID="054562811f8e2b39573262f9893cce064e89de0b153147ea7ba0a362026b02be" Jan 28 13:30:14 crc kubenswrapper[4848]: I0128 13:30:14.871250 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:30:14 crc kubenswrapper[4848]: E0128 13:30:14.872453 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:30:27 crc kubenswrapper[4848]: I0128 13:30:27.850742 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:30:27 crc kubenswrapper[4848]: E0128 13:30:27.851924 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:30:41 crc kubenswrapper[4848]: I0128 13:30:41.851392 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:30:41 crc kubenswrapper[4848]: E0128 13:30:41.852541 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:30:56 crc kubenswrapper[4848]: I0128 13:30:56.850715 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:30:56 crc kubenswrapper[4848]: E0128 13:30:56.851801 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:31:10 crc kubenswrapper[4848]: I0128 13:31:10.850656 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:31:10 crc kubenswrapper[4848]: E0128 13:31:10.851565 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:31:18 crc kubenswrapper[4848]: I0128 13:31:18.814542 4848 generic.go:334] "Generic (PLEG): container finished" podID="15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" containerID="690119869cf1c47d0d5becf08c8320634a8d1e18c2eb327a62aced1054237d49" exitCode=0 Jan 28 13:31:18 crc kubenswrapper[4848]: I0128 13:31:18.814670 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" event={"ID":"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f","Type":"ContainerDied","Data":"690119869cf1c47d0d5becf08c8320634a8d1e18c2eb327a62aced1054237d49"} Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.465444 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.603013 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-0\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.603180 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-inventory\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.603236 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-ssh-key-openstack-edpm-ipam\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.603293 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-1\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.603403 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-1\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.603675 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-extra-config-0\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.604053 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-combined-ca-bundle\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.604104 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-0\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.604223 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qr6x\" (UniqueName: \"kubernetes.io/projected/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-kube-api-access-7qr6x\") pod \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\" (UID: \"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f\") " Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.620728 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.621476 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-kube-api-access-7qr6x" (OuterVolumeSpecName: "kube-api-access-7qr6x") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "kube-api-access-7qr6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.642856 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.646068 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.648829 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.662643 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-inventory" (OuterVolumeSpecName: "inventory") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.666045 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.666588 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.677992 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" (UID: "15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715071 4848 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715336 4848 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715353 4848 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715365 4848 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715393 4848 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715407 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qr6x\" (UniqueName: \"kubernetes.io/projected/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-kube-api-access-7qr6x\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715419 4848 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715433 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.715448 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.840201 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" event={"ID":"15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f","Type":"ContainerDied","Data":"cd1ebe480df4baf81052f2d4ef58a1b3b472198f230cabc7c4ea55e0b2351239"} Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.840580 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd1ebe480df4baf81052f2d4ef58a1b3b472198f230cabc7c4ea55e0b2351239" Jan 28 13:31:20 crc kubenswrapper[4848]: I0128 13:31:20.840291 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhf8n" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.046329 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn"] Jan 28 13:31:21 crc kubenswrapper[4848]: E0128 13:31:21.053238 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.053303 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 13:31:21 crc kubenswrapper[4848]: E0128 13:31:21.053326 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d52ea87-50d4-46fc-a882-2a2966210069" containerName="collect-profiles" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.053337 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d52ea87-50d4-46fc-a882-2a2966210069" containerName="collect-profiles" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.053578 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d52ea87-50d4-46fc-a882-2a2966210069" containerName="collect-profiles" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.053601 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.054515 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.058587 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn"] Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.079120 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.079280 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.079660 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.079742 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.079919 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mf6tl" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.227347 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.227397 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.227429 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.227463 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmqzx\" (UniqueName: \"kubernetes.io/projected/42d08409-a571-40ac-968e-7ac9a5280841-kube-api-access-pmqzx\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.227672 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.228008 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.228063 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.330479 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.330543 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.330566 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.330595 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmqzx\" (UniqueName: \"kubernetes.io/projected/42d08409-a571-40ac-968e-7ac9a5280841-kube-api-access-pmqzx\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.330642 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.330705 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.330728 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.337297 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.338842 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.338891 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.340031 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.346071 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.355973 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.357959 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmqzx\" (UniqueName: \"kubernetes.io/projected/42d08409-a571-40ac-968e-7ac9a5280841-kube-api-access-pmqzx\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkctn\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.391734 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:31:21 crc kubenswrapper[4848]: I0128 13:31:21.850253 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:31:21 crc kubenswrapper[4848]: E0128 13:31:21.851366 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:31:22 crc kubenswrapper[4848]: I0128 13:31:22.059630 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn"] Jan 28 13:31:22 crc kubenswrapper[4848]: I0128 13:31:22.868551 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" event={"ID":"42d08409-a571-40ac-968e-7ac9a5280841","Type":"ContainerStarted","Data":"46db77dec7cfc91862749214fec65ee675e846cef96f85b2dc93a503dc322538"} Jan 28 13:31:23 crc kubenswrapper[4848]: I0128 13:31:23.881207 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" event={"ID":"42d08409-a571-40ac-968e-7ac9a5280841","Type":"ContainerStarted","Data":"7f92441ee336849b543e856c8f5cc614461c73950b6570627e130a47683135c2"} Jan 28 13:31:23 crc kubenswrapper[4848]: I0128 13:31:23.948937 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" podStartSLOduration=2.421090254 podStartE2EDuration="2.948898783s" podCreationTimestamp="2026-01-28 13:31:21 +0000 UTC" firstStartedPulling="2026-01-28 13:31:22.063397911 +0000 UTC m=+2708.975614949" lastFinishedPulling="2026-01-28 13:31:22.59120644 +0000 UTC m=+2709.503423478" observedRunningTime="2026-01-28 13:31:23.908502371 +0000 UTC m=+2710.820719409" watchObservedRunningTime="2026-01-28 13:31:23.948898783 +0000 UTC m=+2710.861115821" Jan 28 13:31:33 crc kubenswrapper[4848]: I0128 13:31:33.851008 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:31:33 crc kubenswrapper[4848]: E0128 13:31:33.852123 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.698479 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hjl86"] Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.701375 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.712660 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hjl86"] Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.818217 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b5720cb-d35a-4b2b-8462-e18da80b34d0-catalog-content\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.818392 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b5720cb-d35a-4b2b-8462-e18da80b34d0-utilities\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.818554 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtgs9\" (UniqueName: \"kubernetes.io/projected/1b5720cb-d35a-4b2b-8462-e18da80b34d0-kube-api-access-qtgs9\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.920518 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b5720cb-d35a-4b2b-8462-e18da80b34d0-catalog-content\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.920622 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b5720cb-d35a-4b2b-8462-e18da80b34d0-utilities\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.920701 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtgs9\" (UniqueName: \"kubernetes.io/projected/1b5720cb-d35a-4b2b-8462-e18da80b34d0-kube-api-access-qtgs9\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.921363 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b5720cb-d35a-4b2b-8462-e18da80b34d0-catalog-content\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.921785 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b5720cb-d35a-4b2b-8462-e18da80b34d0-utilities\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:35 crc kubenswrapper[4848]: I0128 13:31:35.946113 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtgs9\" (UniqueName: \"kubernetes.io/projected/1b5720cb-d35a-4b2b-8462-e18da80b34d0-kube-api-access-qtgs9\") pod \"redhat-operators-hjl86\" (UID: \"1b5720cb-d35a-4b2b-8462-e18da80b34d0\") " pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:36 crc kubenswrapper[4848]: I0128 13:31:36.036821 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:36 crc kubenswrapper[4848]: I0128 13:31:36.616196 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hjl86"] Jan 28 13:31:37 crc kubenswrapper[4848]: I0128 13:31:37.026757 4848 generic.go:334] "Generic (PLEG): container finished" podID="1b5720cb-d35a-4b2b-8462-e18da80b34d0" containerID="3f245725ac9d18e2dddb9d94879c92f9246378000c00818c5afe73499f45fc01" exitCode=0 Jan 28 13:31:37 crc kubenswrapper[4848]: I0128 13:31:37.026862 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjl86" event={"ID":"1b5720cb-d35a-4b2b-8462-e18da80b34d0","Type":"ContainerDied","Data":"3f245725ac9d18e2dddb9d94879c92f9246378000c00818c5afe73499f45fc01"} Jan 28 13:31:37 crc kubenswrapper[4848]: I0128 13:31:37.028633 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjl86" event={"ID":"1b5720cb-d35a-4b2b-8462-e18da80b34d0","Type":"ContainerStarted","Data":"2540c5f2f532b03c58898af1a85f6abf366b110453dd59d5cb24db16122a8b3b"} Jan 28 13:31:45 crc kubenswrapper[4848]: I0128 13:31:45.852416 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:31:49 crc kubenswrapper[4848]: I0128 13:31:49.222463 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjl86" event={"ID":"1b5720cb-d35a-4b2b-8462-e18da80b34d0","Type":"ContainerStarted","Data":"213b759e7143d17b4dbdae8b8885d746b99c2323b17b0109c7d4574015d59d12"} Jan 28 13:31:49 crc kubenswrapper[4848]: I0128 13:31:49.229452 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"04fa88df5b6c08221521ccc691315473dbd0a3b2d5381aaf58b37daaf6f3cc2f"} Jan 28 13:31:53 crc kubenswrapper[4848]: I0128 13:31:53.274943 4848 generic.go:334] "Generic (PLEG): container finished" podID="1b5720cb-d35a-4b2b-8462-e18da80b34d0" containerID="213b759e7143d17b4dbdae8b8885d746b99c2323b17b0109c7d4574015d59d12" exitCode=0 Jan 28 13:31:53 crc kubenswrapper[4848]: I0128 13:31:53.275000 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjl86" event={"ID":"1b5720cb-d35a-4b2b-8462-e18da80b34d0","Type":"ContainerDied","Data":"213b759e7143d17b4dbdae8b8885d746b99c2323b17b0109c7d4574015d59d12"} Jan 28 13:31:54 crc kubenswrapper[4848]: I0128 13:31:54.328943 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hjl86" event={"ID":"1b5720cb-d35a-4b2b-8462-e18da80b34d0","Type":"ContainerStarted","Data":"4e7a164496a6d91ba289d8b2715a64c9859534598ebb0815abb7a8a8d09ce9e2"} Jan 28 13:31:54 crc kubenswrapper[4848]: I0128 13:31:54.354646 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hjl86" podStartSLOduration=2.675653306 podStartE2EDuration="19.354609995s" podCreationTimestamp="2026-01-28 13:31:35 +0000 UTC" firstStartedPulling="2026-01-28 13:31:37.028944693 +0000 UTC m=+2723.941161721" lastFinishedPulling="2026-01-28 13:31:53.707901372 +0000 UTC m=+2740.620118410" observedRunningTime="2026-01-28 13:31:54.349697033 +0000 UTC m=+2741.261914061" watchObservedRunningTime="2026-01-28 13:31:54.354609995 +0000 UTC m=+2741.266827033" Jan 28 13:31:56 crc kubenswrapper[4848]: I0128 13:31:56.038272 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:56 crc kubenswrapper[4848]: I0128 13:31:56.038796 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:31:57 crc kubenswrapper[4848]: I0128 13:31:57.103605 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hjl86" podUID="1b5720cb-d35a-4b2b-8462-e18da80b34d0" containerName="registry-server" probeResult="failure" output=< Jan 28 13:31:57 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:31:57 crc kubenswrapper[4848]: > Jan 28 13:32:07 crc kubenswrapper[4848]: I0128 13:32:07.114553 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hjl86" podUID="1b5720cb-d35a-4b2b-8462-e18da80b34d0" containerName="registry-server" probeResult="failure" output=< Jan 28 13:32:07 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:32:07 crc kubenswrapper[4848]: > Jan 28 13:32:16 crc kubenswrapper[4848]: I0128 13:32:16.119474 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:32:16 crc kubenswrapper[4848]: I0128 13:32:16.180444 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hjl86" Jan 28 13:32:16 crc kubenswrapper[4848]: I0128 13:32:16.312040 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hjl86"] Jan 28 13:32:16 crc kubenswrapper[4848]: I0128 13:32:16.369979 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jzvm9"] Jan 28 13:32:16 crc kubenswrapper[4848]: I0128 13:32:16.370369 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jzvm9" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="registry-server" containerID="cri-o://018e6be662a04bd10a921eea5d1b9fc75818a3fa03a6123212a91e4042a12d4f" gracePeriod=2 Jan 28 13:32:16 crc kubenswrapper[4848]: I0128 13:32:16.614099 4848 generic.go:334] "Generic (PLEG): container finished" podID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerID="018e6be662a04bd10a921eea5d1b9fc75818a3fa03a6123212a91e4042a12d4f" exitCode=0 Jan 28 13:32:16 crc kubenswrapper[4848]: I0128 13:32:16.615373 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jzvm9" event={"ID":"658ce371-1c32-4cb6-ab5c-9f67ed85353b","Type":"ContainerDied","Data":"018e6be662a04bd10a921eea5d1b9fc75818a3fa03a6123212a91e4042a12d4f"} Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.011664 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.121996 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-catalog-content\") pod \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.122072 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khvc5\" (UniqueName: \"kubernetes.io/projected/658ce371-1c32-4cb6-ab5c-9f67ed85353b-kube-api-access-khvc5\") pod \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.122464 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-utilities\") pod \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\" (UID: \"658ce371-1c32-4cb6-ab5c-9f67ed85353b\") " Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.125710 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-utilities" (OuterVolumeSpecName: "utilities") pod "658ce371-1c32-4cb6-ab5c-9f67ed85353b" (UID: "658ce371-1c32-4cb6-ab5c-9f67ed85353b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.141648 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/658ce371-1c32-4cb6-ab5c-9f67ed85353b-kube-api-access-khvc5" (OuterVolumeSpecName: "kube-api-access-khvc5") pod "658ce371-1c32-4cb6-ab5c-9f67ed85353b" (UID: "658ce371-1c32-4cb6-ab5c-9f67ed85353b"). InnerVolumeSpecName "kube-api-access-khvc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.225807 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khvc5\" (UniqueName: \"kubernetes.io/projected/658ce371-1c32-4cb6-ab5c-9f67ed85353b-kube-api-access-khvc5\") on node \"crc\" DevicePath \"\"" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.225845 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.255492 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "658ce371-1c32-4cb6-ab5c-9f67ed85353b" (UID: "658ce371-1c32-4cb6-ab5c-9f67ed85353b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.328423 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658ce371-1c32-4cb6-ab5c-9f67ed85353b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.627018 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jzvm9" event={"ID":"658ce371-1c32-4cb6-ab5c-9f67ed85353b","Type":"ContainerDied","Data":"0cde0fb010ffd9c4c488dd23782e150f00251df889dff723ecf0e1fdb26b161a"} Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.627098 4848 scope.go:117] "RemoveContainer" containerID="018e6be662a04bd10a921eea5d1b9fc75818a3fa03a6123212a91e4042a12d4f" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.627554 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jzvm9" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.662112 4848 scope.go:117] "RemoveContainer" containerID="596e37d805c65e880e443ca109bbaf1cb7bacd829a6f5d62ae970be0113a46e2" Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.670145 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jzvm9"] Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.682502 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jzvm9"] Jan 28 13:32:17 crc kubenswrapper[4848]: I0128 13:32:17.696758 4848 scope.go:117] "RemoveContainer" containerID="eef580e18af88bc5a8372506a007a2226ea45e135e9c2c4d9785071664997a59" Jan 28 13:32:18 crc kubenswrapper[4848]: I0128 13:32:18.865561 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" path="/var/lib/kubelet/pods/658ce371-1c32-4cb6-ab5c-9f67ed85353b/volumes" Jan 28 13:33:33 crc kubenswrapper[4848]: I0128 13:33:33.532483 4848 generic.go:334] "Generic (PLEG): container finished" podID="42d08409-a571-40ac-968e-7ac9a5280841" containerID="7f92441ee336849b543e856c8f5cc614461c73950b6570627e130a47683135c2" exitCode=0 Jan 28 13:33:33 crc kubenswrapper[4848]: I0128 13:33:33.532570 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" event={"ID":"42d08409-a571-40ac-968e-7ac9a5280841","Type":"ContainerDied","Data":"7f92441ee336849b543e856c8f5cc614461c73950b6570627e130a47683135c2"} Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.110172 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.147742 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-0\") pod \"42d08409-a571-40ac-968e-7ac9a5280841\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.147812 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-inventory\") pod \"42d08409-a571-40ac-968e-7ac9a5280841\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.147863 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-telemetry-combined-ca-bundle\") pod \"42d08409-a571-40ac-968e-7ac9a5280841\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.147985 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ssh-key-openstack-edpm-ipam\") pod \"42d08409-a571-40ac-968e-7ac9a5280841\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.148004 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-2\") pod \"42d08409-a571-40ac-968e-7ac9a5280841\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.148067 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-1\") pod \"42d08409-a571-40ac-968e-7ac9a5280841\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.148109 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmqzx\" (UniqueName: \"kubernetes.io/projected/42d08409-a571-40ac-968e-7ac9a5280841-kube-api-access-pmqzx\") pod \"42d08409-a571-40ac-968e-7ac9a5280841\" (UID: \"42d08409-a571-40ac-968e-7ac9a5280841\") " Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.171026 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42d08409-a571-40ac-968e-7ac9a5280841-kube-api-access-pmqzx" (OuterVolumeSpecName: "kube-api-access-pmqzx") pod "42d08409-a571-40ac-968e-7ac9a5280841" (UID: "42d08409-a571-40ac-968e-7ac9a5280841"). InnerVolumeSpecName "kube-api-access-pmqzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.173418 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "42d08409-a571-40ac-968e-7ac9a5280841" (UID: "42d08409-a571-40ac-968e-7ac9a5280841"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.184179 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "42d08409-a571-40ac-968e-7ac9a5280841" (UID: "42d08409-a571-40ac-968e-7ac9a5280841"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.199790 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-inventory" (OuterVolumeSpecName: "inventory") pod "42d08409-a571-40ac-968e-7ac9a5280841" (UID: "42d08409-a571-40ac-968e-7ac9a5280841"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.200539 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "42d08409-a571-40ac-968e-7ac9a5280841" (UID: "42d08409-a571-40ac-968e-7ac9a5280841"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.203587 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "42d08409-a571-40ac-968e-7ac9a5280841" (UID: "42d08409-a571-40ac-968e-7ac9a5280841"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.230345 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "42d08409-a571-40ac-968e-7ac9a5280841" (UID: "42d08409-a571-40ac-968e-7ac9a5280841"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.251172 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmqzx\" (UniqueName: \"kubernetes.io/projected/42d08409-a571-40ac-968e-7ac9a5280841-kube-api-access-pmqzx\") on node \"crc\" DevicePath \"\"" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.251220 4848 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.251238 4848 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.251266 4848 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.251277 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.251288 4848 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.251297 4848 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/42d08409-a571-40ac-968e-7ac9a5280841-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.559518 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" event={"ID":"42d08409-a571-40ac-968e-7ac9a5280841","Type":"ContainerDied","Data":"46db77dec7cfc91862749214fec65ee675e846cef96f85b2dc93a503dc322538"} Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.559565 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46db77dec7cfc91862749214fec65ee675e846cef96f85b2dc93a503dc322538" Jan 28 13:33:35 crc kubenswrapper[4848]: I0128 13:33:35.559658 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkctn" Jan 28 13:34:07 crc kubenswrapper[4848]: I0128 13:34:07.924804 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:34:07 crc kubenswrapper[4848]: I0128 13:34:07.925707 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.533930 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Jan 28 13:34:11 crc kubenswrapper[4848]: E0128 13:34:11.535283 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="registry-server" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.535298 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="registry-server" Jan 28 13:34:11 crc kubenswrapper[4848]: E0128 13:34:11.535324 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="extract-utilities" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.535330 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="extract-utilities" Jan 28 13:34:11 crc kubenswrapper[4848]: E0128 13:34:11.535343 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="extract-content" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.535350 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="extract-content" Jan 28 13:34:11 crc kubenswrapper[4848]: E0128 13:34:11.535369 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42d08409-a571-40ac-968e-7ac9a5280841" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.535377 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="42d08409-a571-40ac-968e-7ac9a5280841" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.535615 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="42d08409-a571-40ac-968e-7ac9a5280841" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.535660 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="658ce371-1c32-4cb6-ab5c-9f67ed85353b" containerName="registry-server" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.537093 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.540321 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.552672 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.665379 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-0"] Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.667675 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.670798 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-config-data" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.698880 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.711701 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-scripts\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.711783 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-dev\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712028 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712178 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2hms\" (UniqueName: \"kubernetes.io/projected/b4edacab-a671-4ace-8bb5-bd113d2c666b-kube-api-access-p2hms\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712220 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-config-data-custom\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712358 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-nvme\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712409 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712453 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712546 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-config-data\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712717 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712755 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-lib-modules\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712812 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712891 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712924 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-run\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.712970 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-sys\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.760676 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.763285 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.771028 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-2-config-data" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.782278 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815027 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-dev\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815120 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815183 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmf5t\" (UniqueName: \"kubernetes.io/projected/f434c780-9c6b-4fa2-b5a2-0220b134bb73-kube-api-access-tmf5t\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815204 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815225 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2hms\" (UniqueName: \"kubernetes.io/projected/b4edacab-a671-4ace-8bb5-bd113d2c666b-kube-api-access-p2hms\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815268 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-config-data-custom\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815304 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-nvme\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815329 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815347 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815369 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815388 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-sys\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815412 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-run\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815429 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-config-data\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815452 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815492 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815544 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815560 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815583 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815601 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-lib-modules\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815622 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815652 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815672 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-run\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815693 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815718 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-sys\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815743 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815770 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815789 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815828 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-scripts\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815865 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815892 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-dev\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.815995 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-dev\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817026 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817111 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-nvme\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817330 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817379 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817513 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817577 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-sys\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817651 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-run\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817675 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-lib-modules\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.817883 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/b4edacab-a671-4ace-8bb5-bd113d2c666b-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.825975 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.830746 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-config-data-custom\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.832877 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-config-data\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.838232 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4edacab-a671-4ace-8bb5-bd113d2c666b-scripts\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.838740 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2hms\" (UniqueName: \"kubernetes.io/projected/b4edacab-a671-4ace-8bb5-bd113d2c666b-kube-api-access-p2hms\") pod \"cinder-backup-0\" (UID: \"b4edacab-a671-4ace-8bb5-bd113d2c666b\") " pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.904328 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918062 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918142 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918176 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-dev\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918223 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmf5t\" (UniqueName: \"kubernetes.io/projected/f434c780-9c6b-4fa2-b5a2-0220b134bb73-kube-api-access-tmf5t\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918246 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918301 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918330 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918348 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918376 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-sys\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918396 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918416 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918460 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-run\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918480 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918503 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918528 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918537 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918557 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jkwb\" (UniqueName: \"kubernetes.io/projected/d004b545-6c1d-42f8-93cb-be2549026492-kube-api-access-9jkwb\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918668 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918707 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-dev\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918737 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918768 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-sys\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918795 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918839 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918907 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-run\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.918995 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919042 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919090 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919141 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919164 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919770 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919815 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919888 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919922 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919956 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919974 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.919995 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.920015 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.920056 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.921631 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.921707 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f434c780-9c6b-4fa2-b5a2-0220b134bb73-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.923561 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.924188 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.927939 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.930807 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f434c780-9c6b-4fa2-b5a2-0220b134bb73-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.937777 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmf5t\" (UniqueName: \"kubernetes.io/projected/f434c780-9c6b-4fa2-b5a2-0220b134bb73-kube-api-access-tmf5t\") pod \"cinder-volume-nfs-0\" (UID: \"f434c780-9c6b-4fa2-b5a2-0220b134bb73\") " pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:11 crc kubenswrapper[4848]: I0128 13:34:11.984430 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026038 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026119 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026163 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026212 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026294 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026339 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026373 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026404 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026432 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026444 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026474 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026497 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026537 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jkwb\" (UniqueName: \"kubernetes.io/projected/d004b545-6c1d-42f8-93cb-be2549026492-kube-api-access-9jkwb\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026560 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026585 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026620 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026642 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026759 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.026867 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.029149 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.029241 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.029917 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.029966 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.030017 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.030079 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d004b545-6c1d-42f8-93cb-be2549026492-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.034914 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.036136 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.037779 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.039060 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d004b545-6c1d-42f8-93cb-be2549026492-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.059662 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jkwb\" (UniqueName: \"kubernetes.io/projected/d004b545-6c1d-42f8-93cb-be2549026492-kube-api-access-9jkwb\") pod \"cinder-volume-nfs-2-0\" (UID: \"d004b545-6c1d-42f8-93cb-be2549026492\") " pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.090590 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.706165 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.735768 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:34:12 crc kubenswrapper[4848]: I0128 13:34:12.953810 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Jan 28 13:34:13 crc kubenswrapper[4848]: I0128 13:34:13.006514 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"b4edacab-a671-4ace-8bb5-bd113d2c666b","Type":"ContainerStarted","Data":"4f138bec2235ddc9d27811b41173d174eaca43a454521f4490adad73c057814c"} Jan 28 13:34:13 crc kubenswrapper[4848]: W0128 13:34:13.076474 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd004b545_6c1d_42f8_93cb_be2549026492.slice/crio-22560d1b3ba27b62eac2711409e541ae57b83959ed73bb5e834a8456dc454bc5 WatchSource:0}: Error finding container 22560d1b3ba27b62eac2711409e541ae57b83959ed73bb5e834a8456dc454bc5: Status 404 returned error can't find the container with id 22560d1b3ba27b62eac2711409e541ae57b83959ed73bb5e834a8456dc454bc5 Jan 28 13:34:13 crc kubenswrapper[4848]: I0128 13:34:13.120781 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Jan 28 13:34:13 crc kubenswrapper[4848]: W0128 13:34:13.123918 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf434c780_9c6b_4fa2_b5a2_0220b134bb73.slice/crio-fdc1ca58b2d34a85157d8401e8c1e3702f9c6d75845db7aa85035f1b1f007f62 WatchSource:0}: Error finding container fdc1ca58b2d34a85157d8401e8c1e3702f9c6d75845db7aa85035f1b1f007f62: Status 404 returned error can't find the container with id fdc1ca58b2d34a85157d8401e8c1e3702f9c6d75845db7aa85035f1b1f007f62 Jan 28 13:34:14 crc kubenswrapper[4848]: I0128 13:34:14.025809 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"d004b545-6c1d-42f8-93cb-be2549026492","Type":"ContainerStarted","Data":"7b27ad4862c59a923da336606e291d59dbcd4783380afceddd0155001ce13c5c"} Jan 28 13:34:14 crc kubenswrapper[4848]: I0128 13:34:14.026624 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"d004b545-6c1d-42f8-93cb-be2549026492","Type":"ContainerStarted","Data":"22560d1b3ba27b62eac2711409e541ae57b83959ed73bb5e834a8456dc454bc5"} Jan 28 13:34:14 crc kubenswrapper[4848]: I0128 13:34:14.029789 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"b4edacab-a671-4ace-8bb5-bd113d2c666b","Type":"ContainerStarted","Data":"96478d41535b33f6acb2200c1dbb6ac2b5e7207cd21bc2e11d76b4611c762dcd"} Jan 28 13:34:14 crc kubenswrapper[4848]: I0128 13:34:14.029921 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"b4edacab-a671-4ace-8bb5-bd113d2c666b","Type":"ContainerStarted","Data":"8872649b262b79a5dd1c0d4af63abfa5169fa33bb8f024a0de2e1c025cbb3dbd"} Jan 28 13:34:14 crc kubenswrapper[4848]: I0128 13:34:14.034125 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"f434c780-9c6b-4fa2-b5a2-0220b134bb73","Type":"ContainerStarted","Data":"96b1e63fa417b7aea4a5d146cbc91dc604f5311423aaec4458a1117655d00e26"} Jan 28 13:34:14 crc kubenswrapper[4848]: I0128 13:34:14.034177 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"f434c780-9c6b-4fa2-b5a2-0220b134bb73","Type":"ContainerStarted","Data":"fdc1ca58b2d34a85157d8401e8c1e3702f9c6d75845db7aa85035f1b1f007f62"} Jan 28 13:34:14 crc kubenswrapper[4848]: I0128 13:34:14.074948 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.687505929 podStartE2EDuration="3.074916542s" podCreationTimestamp="2026-01-28 13:34:11 +0000 UTC" firstStartedPulling="2026-01-28 13:34:12.735460892 +0000 UTC m=+2879.647677930" lastFinishedPulling="2026-01-28 13:34:13.122871515 +0000 UTC m=+2880.035088543" observedRunningTime="2026-01-28 13:34:14.061795007 +0000 UTC m=+2880.974012045" watchObservedRunningTime="2026-01-28 13:34:14.074916542 +0000 UTC m=+2880.987133580" Jan 28 13:34:15 crc kubenswrapper[4848]: I0128 13:34:15.063662 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"f434c780-9c6b-4fa2-b5a2-0220b134bb73","Type":"ContainerStarted","Data":"98add663532d1da11092bec0d351f467f4cdf89c4f614e91a221b15a4bf6b59c"} Jan 28 13:34:15 crc kubenswrapper[4848]: I0128 13:34:15.069391 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"d004b545-6c1d-42f8-93cb-be2549026492","Type":"ContainerStarted","Data":"03e702faf8849957e7251ec087e0d2327ad1ea146b69bc89c50ccb728ca5dd8d"} Jan 28 13:34:15 crc kubenswrapper[4848]: I0128 13:34:15.096063 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-0" podStartSLOduration=3.7986574170000003 podStartE2EDuration="4.096015196s" podCreationTimestamp="2026-01-28 13:34:11 +0000 UTC" firstStartedPulling="2026-01-28 13:34:13.126482152 +0000 UTC m=+2880.038699190" lastFinishedPulling="2026-01-28 13:34:13.423839921 +0000 UTC m=+2880.336056969" observedRunningTime="2026-01-28 13:34:15.091641498 +0000 UTC m=+2882.003858546" watchObservedRunningTime="2026-01-28 13:34:15.096015196 +0000 UTC m=+2882.008232234" Jan 28 13:34:15 crc kubenswrapper[4848]: I0128 13:34:15.138996 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-2-0" podStartSLOduration=3.878750933 podStartE2EDuration="4.138970107s" podCreationTimestamp="2026-01-28 13:34:11 +0000 UTC" firstStartedPulling="2026-01-28 13:34:13.119051412 +0000 UTC m=+2880.031268450" lastFinishedPulling="2026-01-28 13:34:13.379270586 +0000 UTC m=+2880.291487624" observedRunningTime="2026-01-28 13:34:15.129039969 +0000 UTC m=+2882.041257007" watchObservedRunningTime="2026-01-28 13:34:15.138970107 +0000 UTC m=+2882.051187135" Jan 28 13:34:16 crc kubenswrapper[4848]: I0128 13:34:16.905022 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Jan 28 13:34:16 crc kubenswrapper[4848]: I0128 13:34:16.985439 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:17 crc kubenswrapper[4848]: I0128 13:34:17.091904 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:22 crc kubenswrapper[4848]: I0128 13:34:22.174733 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-0" Jan 28 13:34:22 crc kubenswrapper[4848]: I0128 13:34:22.196429 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Jan 28 13:34:22 crc kubenswrapper[4848]: I0128 13:34:22.316112 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-2-0" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.209964 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-95dw4"] Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.212717 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.259955 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-95dw4"] Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.329387 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-catalog-content\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.329439 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl9mq\" (UniqueName: \"kubernetes.io/projected/7a77b221-52fc-4ea6-8e26-8e6355511dc2-kube-api-access-xl9mq\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.329583 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-utilities\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.432297 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-catalog-content\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.432366 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl9mq\" (UniqueName: \"kubernetes.io/projected/7a77b221-52fc-4ea6-8e26-8e6355511dc2-kube-api-access-xl9mq\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.432536 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-utilities\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.433142 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-utilities\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.433676 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-catalog-content\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.471576 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl9mq\" (UniqueName: \"kubernetes.io/projected/7a77b221-52fc-4ea6-8e26-8e6355511dc2-kube-api-access-xl9mq\") pod \"certified-operators-95dw4\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:24 crc kubenswrapper[4848]: I0128 13:34:24.545134 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:25 crc kubenswrapper[4848]: I0128 13:34:25.235555 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-95dw4"] Jan 28 13:34:26 crc kubenswrapper[4848]: I0128 13:34:26.257818 4848 generic.go:334] "Generic (PLEG): container finished" podID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerID="3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37" exitCode=0 Jan 28 13:34:26 crc kubenswrapper[4848]: I0128 13:34:26.257931 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95dw4" event={"ID":"7a77b221-52fc-4ea6-8e26-8e6355511dc2","Type":"ContainerDied","Data":"3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37"} Jan 28 13:34:26 crc kubenswrapper[4848]: I0128 13:34:26.258502 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95dw4" event={"ID":"7a77b221-52fc-4ea6-8e26-8e6355511dc2","Type":"ContainerStarted","Data":"8c69bff96d507388d3bef4084c33d211aa03265ed366426f12dd5f7f15a44946"} Jan 28 13:34:27 crc kubenswrapper[4848]: I0128 13:34:27.272725 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95dw4" event={"ID":"7a77b221-52fc-4ea6-8e26-8e6355511dc2","Type":"ContainerStarted","Data":"b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236"} Jan 28 13:34:29 crc kubenswrapper[4848]: I0128 13:34:29.298458 4848 generic.go:334] "Generic (PLEG): container finished" podID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerID="b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236" exitCode=0 Jan 28 13:34:29 crc kubenswrapper[4848]: I0128 13:34:29.298551 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95dw4" event={"ID":"7a77b221-52fc-4ea6-8e26-8e6355511dc2","Type":"ContainerDied","Data":"b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236"} Jan 28 13:34:30 crc kubenswrapper[4848]: I0128 13:34:30.314474 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95dw4" event={"ID":"7a77b221-52fc-4ea6-8e26-8e6355511dc2","Type":"ContainerStarted","Data":"76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f"} Jan 28 13:34:30 crc kubenswrapper[4848]: I0128 13:34:30.356826 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-95dw4" podStartSLOduration=2.852488404 podStartE2EDuration="6.356797895s" podCreationTimestamp="2026-01-28 13:34:24 +0000 UTC" firstStartedPulling="2026-01-28 13:34:26.262988597 +0000 UTC m=+2893.175205645" lastFinishedPulling="2026-01-28 13:34:29.767298098 +0000 UTC m=+2896.679515136" observedRunningTime="2026-01-28 13:34:30.344262266 +0000 UTC m=+2897.256479314" watchObservedRunningTime="2026-01-28 13:34:30.356797895 +0000 UTC m=+2897.269014923" Jan 28 13:34:34 crc kubenswrapper[4848]: I0128 13:34:34.546298 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:34 crc kubenswrapper[4848]: I0128 13:34:34.548090 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:35 crc kubenswrapper[4848]: I0128 13:34:35.600388 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-95dw4" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="registry-server" probeResult="failure" output=< Jan 28 13:34:35 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:34:35 crc kubenswrapper[4848]: > Jan 28 13:34:37 crc kubenswrapper[4848]: I0128 13:34:37.924419 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:34:37 crc kubenswrapper[4848]: I0128 13:34:37.924945 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:34:44 crc kubenswrapper[4848]: I0128 13:34:44.610502 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:44 crc kubenswrapper[4848]: I0128 13:34:44.690535 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:44 crc kubenswrapper[4848]: I0128 13:34:44.873618 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-95dw4"] Jan 28 13:34:46 crc kubenswrapper[4848]: I0128 13:34:46.521853 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-95dw4" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="registry-server" containerID="cri-o://76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f" gracePeriod=2 Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.120448 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.237007 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xl9mq\" (UniqueName: \"kubernetes.io/projected/7a77b221-52fc-4ea6-8e26-8e6355511dc2-kube-api-access-xl9mq\") pod \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.237072 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-utilities\") pod \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.237400 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-catalog-content\") pod \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\" (UID: \"7a77b221-52fc-4ea6-8e26-8e6355511dc2\") " Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.238357 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-utilities" (OuterVolumeSpecName: "utilities") pod "7a77b221-52fc-4ea6-8e26-8e6355511dc2" (UID: "7a77b221-52fc-4ea6-8e26-8e6355511dc2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.251300 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a77b221-52fc-4ea6-8e26-8e6355511dc2-kube-api-access-xl9mq" (OuterVolumeSpecName: "kube-api-access-xl9mq") pod "7a77b221-52fc-4ea6-8e26-8e6355511dc2" (UID: "7a77b221-52fc-4ea6-8e26-8e6355511dc2"). InnerVolumeSpecName "kube-api-access-xl9mq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.287202 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7a77b221-52fc-4ea6-8e26-8e6355511dc2" (UID: "7a77b221-52fc-4ea6-8e26-8e6355511dc2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.341174 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.341282 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xl9mq\" (UniqueName: \"kubernetes.io/projected/7a77b221-52fc-4ea6-8e26-8e6355511dc2-kube-api-access-xl9mq\") on node \"crc\" DevicePath \"\"" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.341300 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a77b221-52fc-4ea6-8e26-8e6355511dc2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.538622 4848 generic.go:334] "Generic (PLEG): container finished" podID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerID="76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f" exitCode=0 Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.538670 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95dw4" event={"ID":"7a77b221-52fc-4ea6-8e26-8e6355511dc2","Type":"ContainerDied","Data":"76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f"} Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.538708 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-95dw4" event={"ID":"7a77b221-52fc-4ea6-8e26-8e6355511dc2","Type":"ContainerDied","Data":"8c69bff96d507388d3bef4084c33d211aa03265ed366426f12dd5f7f15a44946"} Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.538730 4848 scope.go:117] "RemoveContainer" containerID="76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.538739 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-95dw4" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.584736 4848 scope.go:117] "RemoveContainer" containerID="b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.599301 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-95dw4"] Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.637819 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-95dw4"] Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.638027 4848 scope.go:117] "RemoveContainer" containerID="3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.692187 4848 scope.go:117] "RemoveContainer" containerID="76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f" Jan 28 13:34:47 crc kubenswrapper[4848]: E0128 13:34:47.692569 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f\": container with ID starting with 76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f not found: ID does not exist" containerID="76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.692612 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f"} err="failed to get container status \"76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f\": rpc error: code = NotFound desc = could not find container \"76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f\": container with ID starting with 76fd61aafeca0de20ef33ba679aa224b4154fbb92922d545eba4ab8e75c43a1f not found: ID does not exist" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.692647 4848 scope.go:117] "RemoveContainer" containerID="b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236" Jan 28 13:34:47 crc kubenswrapper[4848]: E0128 13:34:47.692945 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236\": container with ID starting with b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236 not found: ID does not exist" containerID="b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.692975 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236"} err="failed to get container status \"b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236\": rpc error: code = NotFound desc = could not find container \"b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236\": container with ID starting with b2ade04ccf2380e968462bb61f5cf6f9b00adf32472db18c2d7f81893249f236 not found: ID does not exist" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.692996 4848 scope.go:117] "RemoveContainer" containerID="3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37" Jan 28 13:34:47 crc kubenswrapper[4848]: E0128 13:34:47.693327 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37\": container with ID starting with 3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37 not found: ID does not exist" containerID="3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37" Jan 28 13:34:47 crc kubenswrapper[4848]: I0128 13:34:47.693377 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37"} err="failed to get container status \"3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37\": rpc error: code = NotFound desc = could not find container \"3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37\": container with ID starting with 3716ce2b9a3b063daf4f32ec0213049fd68aee4e29c1c46b663927bc05db1a37 not found: ID does not exist" Jan 28 13:34:48 crc kubenswrapper[4848]: I0128 13:34:48.876609 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" path="/var/lib/kubelet/pods/7a77b221-52fc-4ea6-8e26-8e6355511dc2/volumes" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.704141 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-742d7"] Jan 28 13:34:50 crc kubenswrapper[4848]: E0128 13:34:50.705178 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="extract-content" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.705201 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="extract-content" Jan 28 13:34:50 crc kubenswrapper[4848]: E0128 13:34:50.705240 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="registry-server" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.705289 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="registry-server" Jan 28 13:34:50 crc kubenswrapper[4848]: E0128 13:34:50.705306 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="extract-utilities" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.705317 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="extract-utilities" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.705611 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a77b221-52fc-4ea6-8e26-8e6355511dc2" containerName="registry-server" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.707559 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.735121 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-742d7"] Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.838682 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-utilities\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.838778 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-catalog-content\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.839483 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r94dp\" (UniqueName: \"kubernetes.io/projected/db668477-f88a-44b5-8be1-b23eb2a2831c-kube-api-access-r94dp\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.942395 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-utilities\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.942522 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-catalog-content\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.942671 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r94dp\" (UniqueName: \"kubernetes.io/projected/db668477-f88a-44b5-8be1-b23eb2a2831c-kube-api-access-r94dp\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.943111 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-catalog-content\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.943125 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-utilities\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:50 crc kubenswrapper[4848]: I0128 13:34:50.978171 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r94dp\" (UniqueName: \"kubernetes.io/projected/db668477-f88a-44b5-8be1-b23eb2a2831c-kube-api-access-r94dp\") pod \"redhat-marketplace-742d7\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:51 crc kubenswrapper[4848]: I0128 13:34:51.042434 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:34:51 crc kubenswrapper[4848]: I0128 13:34:51.625870 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-742d7"] Jan 28 13:34:52 crc kubenswrapper[4848]: I0128 13:34:52.605018 4848 generic.go:334] "Generic (PLEG): container finished" podID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerID="9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff" exitCode=0 Jan 28 13:34:52 crc kubenswrapper[4848]: I0128 13:34:52.605088 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-742d7" event={"ID":"db668477-f88a-44b5-8be1-b23eb2a2831c","Type":"ContainerDied","Data":"9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff"} Jan 28 13:34:52 crc kubenswrapper[4848]: I0128 13:34:52.605496 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-742d7" event={"ID":"db668477-f88a-44b5-8be1-b23eb2a2831c","Type":"ContainerStarted","Data":"79b884fd3af5cd09bae1c8c8b4c17ba846152ad5285649b4a74d62787c363e37"} Jan 28 13:34:54 crc kubenswrapper[4848]: I0128 13:34:54.633596 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-742d7" event={"ID":"db668477-f88a-44b5-8be1-b23eb2a2831c","Type":"ContainerStarted","Data":"ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254"} Jan 28 13:34:55 crc kubenswrapper[4848]: I0128 13:34:55.646889 4848 generic.go:334] "Generic (PLEG): container finished" podID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerID="ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254" exitCode=0 Jan 28 13:34:55 crc kubenswrapper[4848]: I0128 13:34:55.646971 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-742d7" event={"ID":"db668477-f88a-44b5-8be1-b23eb2a2831c","Type":"ContainerDied","Data":"ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254"} Jan 28 13:34:56 crc kubenswrapper[4848]: I0128 13:34:56.662352 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-742d7" event={"ID":"db668477-f88a-44b5-8be1-b23eb2a2831c","Type":"ContainerStarted","Data":"3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1"} Jan 28 13:34:56 crc kubenswrapper[4848]: I0128 13:34:56.685561 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-742d7" podStartSLOduration=3.251716474 podStartE2EDuration="6.68549732s" podCreationTimestamp="2026-01-28 13:34:50 +0000 UTC" firstStartedPulling="2026-01-28 13:34:52.607694064 +0000 UTC m=+2919.519911102" lastFinishedPulling="2026-01-28 13:34:56.0414749 +0000 UTC m=+2922.953691948" observedRunningTime="2026-01-28 13:34:56.683492407 +0000 UTC m=+2923.595709495" watchObservedRunningTime="2026-01-28 13:34:56.68549732 +0000 UTC m=+2923.597714408" Jan 28 13:35:01 crc kubenswrapper[4848]: I0128 13:35:01.042656 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:35:01 crc kubenswrapper[4848]: I0128 13:35:01.044756 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:35:01 crc kubenswrapper[4848]: I0128 13:35:01.125367 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:35:01 crc kubenswrapper[4848]: I0128 13:35:01.770313 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:35:01 crc kubenswrapper[4848]: I0128 13:35:01.835157 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-742d7"] Jan 28 13:35:03 crc kubenswrapper[4848]: I0128 13:35:03.737876 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-742d7" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="registry-server" containerID="cri-o://3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1" gracePeriod=2 Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.278847 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.316055 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-utilities\") pod \"db668477-f88a-44b5-8be1-b23eb2a2831c\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.316196 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-catalog-content\") pod \"db668477-f88a-44b5-8be1-b23eb2a2831c\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.316476 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r94dp\" (UniqueName: \"kubernetes.io/projected/db668477-f88a-44b5-8be1-b23eb2a2831c-kube-api-access-r94dp\") pod \"db668477-f88a-44b5-8be1-b23eb2a2831c\" (UID: \"db668477-f88a-44b5-8be1-b23eb2a2831c\") " Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.319434 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-utilities" (OuterVolumeSpecName: "utilities") pod "db668477-f88a-44b5-8be1-b23eb2a2831c" (UID: "db668477-f88a-44b5-8be1-b23eb2a2831c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.326760 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db668477-f88a-44b5-8be1-b23eb2a2831c-kube-api-access-r94dp" (OuterVolumeSpecName: "kube-api-access-r94dp") pod "db668477-f88a-44b5-8be1-b23eb2a2831c" (UID: "db668477-f88a-44b5-8be1-b23eb2a2831c"). InnerVolumeSpecName "kube-api-access-r94dp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.348538 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "db668477-f88a-44b5-8be1-b23eb2a2831c" (UID: "db668477-f88a-44b5-8be1-b23eb2a2831c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.418691 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r94dp\" (UniqueName: \"kubernetes.io/projected/db668477-f88a-44b5-8be1-b23eb2a2831c-kube-api-access-r94dp\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.418730 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.418741 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db668477-f88a-44b5-8be1-b23eb2a2831c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.762514 4848 generic.go:334] "Generic (PLEG): container finished" podID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerID="3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1" exitCode=0 Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.762582 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-742d7" event={"ID":"db668477-f88a-44b5-8be1-b23eb2a2831c","Type":"ContainerDied","Data":"3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1"} Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.762627 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-742d7" event={"ID":"db668477-f88a-44b5-8be1-b23eb2a2831c","Type":"ContainerDied","Data":"79b884fd3af5cd09bae1c8c8b4c17ba846152ad5285649b4a74d62787c363e37"} Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.762658 4848 scope.go:117] "RemoveContainer" containerID="3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.762747 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-742d7" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.815647 4848 scope.go:117] "RemoveContainer" containerID="ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.832311 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-742d7"] Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.847878 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-742d7"] Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.848755 4848 scope.go:117] "RemoveContainer" containerID="9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.871815 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" path="/var/lib/kubelet/pods/db668477-f88a-44b5-8be1-b23eb2a2831c/volumes" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.952396 4848 scope.go:117] "RemoveContainer" containerID="3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1" Jan 28 13:35:04 crc kubenswrapper[4848]: E0128 13:35:04.956108 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1\": container with ID starting with 3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1 not found: ID does not exist" containerID="3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.956191 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1"} err="failed to get container status \"3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1\": rpc error: code = NotFound desc = could not find container \"3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1\": container with ID starting with 3f134f1feb3b4de5259a1f7546eca394e2d8a1a848edf87a5046791a742f9bc1 not found: ID does not exist" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.956244 4848 scope.go:117] "RemoveContainer" containerID="ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254" Jan 28 13:35:04 crc kubenswrapper[4848]: E0128 13:35:04.957149 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254\": container with ID starting with ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254 not found: ID does not exist" containerID="ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.957192 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254"} err="failed to get container status \"ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254\": rpc error: code = NotFound desc = could not find container \"ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254\": container with ID starting with ff991ab2b6fd2f885a90a1b911b3a9a0a64fde7080c434b96968a5fe7b4a8254 not found: ID does not exist" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.957216 4848 scope.go:117] "RemoveContainer" containerID="9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff" Jan 28 13:35:04 crc kubenswrapper[4848]: E0128 13:35:04.959033 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff\": container with ID starting with 9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff not found: ID does not exist" containerID="9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff" Jan 28 13:35:04 crc kubenswrapper[4848]: I0128 13:35:04.959123 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff"} err="failed to get container status \"9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff\": rpc error: code = NotFound desc = could not find container \"9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff\": container with ID starting with 9d59c4d08c7f013a062ca60e34f5e713bca5236c0262fd8bdffc8dba3084cdff not found: ID does not exist" Jan 28 13:35:07 crc kubenswrapper[4848]: I0128 13:35:07.925388 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:35:07 crc kubenswrapper[4848]: I0128 13:35:07.925870 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:35:07 crc kubenswrapper[4848]: I0128 13:35:07.925947 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:35:07 crc kubenswrapper[4848]: I0128 13:35:07.927182 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"04fa88df5b6c08221521ccc691315473dbd0a3b2d5381aaf58b37daaf6f3cc2f"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:35:07 crc kubenswrapper[4848]: I0128 13:35:07.927351 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://04fa88df5b6c08221521ccc691315473dbd0a3b2d5381aaf58b37daaf6f3cc2f" gracePeriod=600 Jan 28 13:35:08 crc kubenswrapper[4848]: I0128 13:35:08.816033 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="04fa88df5b6c08221521ccc691315473dbd0a3b2d5381aaf58b37daaf6f3cc2f" exitCode=0 Jan 28 13:35:08 crc kubenswrapper[4848]: I0128 13:35:08.816159 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"04fa88df5b6c08221521ccc691315473dbd0a3b2d5381aaf58b37daaf6f3cc2f"} Jan 28 13:35:08 crc kubenswrapper[4848]: I0128 13:35:08.817067 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696"} Jan 28 13:35:08 crc kubenswrapper[4848]: I0128 13:35:08.817104 4848 scope.go:117] "RemoveContainer" containerID="4d78299bde05f76348848b8c1a1318197ff16d106729f41915f3b85aad9d885e" Jan 28 13:35:14 crc kubenswrapper[4848]: I0128 13:35:14.952530 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:35:14 crc kubenswrapper[4848]: I0128 13:35:14.953756 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="prometheus" containerID="cri-o://5ebaefa9171c38760ffd414457b36921e7c0c08f6b6cd8116c58d9ebf4f8ec4c" gracePeriod=600 Jan 28 13:35:14 crc kubenswrapper[4848]: I0128 13:35:14.954299 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="thanos-sidecar" containerID="cri-o://da9cc43340008dfc66d1c1f759a26601dcc6f05b2ae126964c3e43fa00ff3e17" gracePeriod=600 Jan 28 13:35:14 crc kubenswrapper[4848]: I0128 13:35:14.954371 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="config-reloader" containerID="cri-o://6cd42bd9a1d13179e079b31f531dcf9df745313d43f018372add67ea22687774" gracePeriod=600 Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906286 4848 generic.go:334] "Generic (PLEG): container finished" podID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerID="da9cc43340008dfc66d1c1f759a26601dcc6f05b2ae126964c3e43fa00ff3e17" exitCode=0 Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906777 4848 generic.go:334] "Generic (PLEG): container finished" podID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerID="6cd42bd9a1d13179e079b31f531dcf9df745313d43f018372add67ea22687774" exitCode=0 Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906793 4848 generic.go:334] "Generic (PLEG): container finished" podID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerID="5ebaefa9171c38760ffd414457b36921e7c0c08f6b6cd8116c58d9ebf4f8ec4c" exitCode=0 Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906368 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerDied","Data":"da9cc43340008dfc66d1c1f759a26601dcc6f05b2ae126964c3e43fa00ff3e17"} Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906844 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerDied","Data":"6cd42bd9a1d13179e079b31f531dcf9df745313d43f018372add67ea22687774"} Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906861 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerDied","Data":"5ebaefa9171c38760ffd414457b36921e7c0c08f6b6cd8116c58d9ebf4f8ec4c"} Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906872 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"0798460c-39c3-4539-8b8a-89a551b4bafc","Type":"ContainerDied","Data":"d9b9513a93187d2dcb85a47e371f990b79c3002a45916394d2ca19b606b70414"} Jan 28 13:35:15 crc kubenswrapper[4848]: I0128 13:35:15.906883 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9b9513a93187d2dcb85a47e371f990b79c3002a45916394d2ca19b606b70414" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.001544 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.053503 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-thanos-prometheus-http-client-file\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.053864 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.053932 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-0\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054093 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/0798460c-39c3-4539-8b8a-89a551b4bafc-config-out\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054160 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-secret-combined-ca-bundle\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054226 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054278 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054332 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-config\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054359 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-2\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054454 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054498 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-tls-assets\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054527 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsl22\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-kube-api-access-bsl22\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.054556 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-1\") pod \"0798460c-39c3-4539-8b8a-89a551b4bafc\" (UID: \"0798460c-39c3-4539-8b8a-89a551b4bafc\") " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.055949 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.061046 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.067770 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0798460c-39c3-4539-8b8a-89a551b4bafc-config-out" (OuterVolumeSpecName: "config-out") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.069819 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.074130 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.087497 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.088475 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.089651 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.101400 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.104355 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-config" (OuterVolumeSpecName: "config") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.142630 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.143552 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-kube-api-access-bsl22" (OuterVolumeSpecName: "kube-api-access-bsl22") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "kube-api-access-bsl22". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.157902 4848 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/0798460c-39c3-4539-8b8a-89a551b4bafc-config-out\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.157956 4848 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.157973 4848 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.157990 4848 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158035 4848 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158050 4848 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158062 4848 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158074 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsl22\" (UniqueName: \"kubernetes.io/projected/0798460c-39c3-4539-8b8a-89a551b4bafc-kube-api-access-bsl22\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158086 4848 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158099 4848 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158160 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") on node \"crc\" " Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.158178 4848 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/0798460c-39c3-4539-8b8a-89a551b4bafc-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.216175 4848 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.216440 4848 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83") on node "crc" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.272753 4848 reconciler_common.go:293] "Volume detached for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.361468 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config" (OuterVolumeSpecName: "web-config") pod "0798460c-39c3-4539-8b8a-89a551b4bafc" (UID: "0798460c-39c3-4539-8b8a-89a551b4bafc"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.377275 4848 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/0798460c-39c3-4539-8b8a-89a551b4bafc-web-config\") on node \"crc\" DevicePath \"\"" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.916584 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.946559 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.957412 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.980809 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:35:16 crc kubenswrapper[4848]: E0128 13:35:16.981275 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="extract-content" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981297 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="extract-content" Jan 28 13:35:16 crc kubenswrapper[4848]: E0128 13:35:16.981314 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="registry-server" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981321 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="registry-server" Jan 28 13:35:16 crc kubenswrapper[4848]: E0128 13:35:16.981332 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="thanos-sidecar" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981340 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="thanos-sidecar" Jan 28 13:35:16 crc kubenswrapper[4848]: E0128 13:35:16.981354 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="prometheus" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981360 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="prometheus" Jan 28 13:35:16 crc kubenswrapper[4848]: E0128 13:35:16.981381 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="config-reloader" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981386 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="config-reloader" Jan 28 13:35:16 crc kubenswrapper[4848]: E0128 13:35:16.981394 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="init-config-reloader" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981400 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="init-config-reloader" Jan 28 13:35:16 crc kubenswrapper[4848]: E0128 13:35:16.981411 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="extract-utilities" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981419 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="extract-utilities" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981606 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="config-reloader" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981631 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="prometheus" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981641 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" containerName="thanos-sidecar" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.981650 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="db668477-f88a-44b5-8be1-b23eb2a2831c" containerName="registry-server" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.984294 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.987187 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.987589 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.988753 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.991390 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.991754 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.991777 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-sll79" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.991787 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 13:35:16 crc kubenswrapper[4848]: I0128 13:35:16.999157 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.021890 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.095834 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.095945 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.095979 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096047 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096112 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096164 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096191 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096224 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096267 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096294 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qtwz\" (UniqueName: \"kubernetes.io/projected/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-kube-api-access-5qtwz\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096353 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096380 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-config\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.096412 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199407 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199466 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199501 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199521 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199543 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qtwz\" (UniqueName: \"kubernetes.io/projected/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-kube-api-access-5qtwz\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199592 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199609 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-config\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199637 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199732 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199754 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199775 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199827 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.199883 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.200664 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.200744 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.201069 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.204067 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-config\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.205865 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.206712 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.207333 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.207405 4848 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.207873 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fbb6ff8d2ceb994243fa9499d5bbb9ac1ad8a88e4c49c99f41a1170dfb512188/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.207949 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.219797 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.224476 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.226141 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qtwz\" (UniqueName: \"kubernetes.io/projected/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-kube-api-access-5qtwz\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.229902 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7bb0f426-1fdb-427a-ad1e-dc5387a1ba01-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.282335 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8b6d72c-fbb6-4d06-bdc9-afce7b087c83\") pod \"prometheus-metric-storage-0\" (UID: \"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01\") " pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.308939 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.838969 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 13:35:17 crc kubenswrapper[4848]: I0128 13:35:17.929785 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01","Type":"ContainerStarted","Data":"7b3b59424656196330e7245a4799a0e3da12ba8ea1e3fa99506346076621f4e0"} Jan 28 13:35:18 crc kubenswrapper[4848]: I0128 13:35:18.864041 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0798460c-39c3-4539-8b8a-89a551b4bafc" path="/var/lib/kubelet/pods/0798460c-39c3-4539-8b8a-89a551b4bafc/volumes" Jan 28 13:35:23 crc kubenswrapper[4848]: I0128 13:35:23.085171 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01","Type":"ContainerStarted","Data":"05b417f3e7200a95d4e971203fa6d642e135b45489c4507e468d51d9014f7f03"} Jan 28 13:35:33 crc kubenswrapper[4848]: I0128 13:35:33.211081 4848 generic.go:334] "Generic (PLEG): container finished" podID="7bb0f426-1fdb-427a-ad1e-dc5387a1ba01" containerID="05b417f3e7200a95d4e971203fa6d642e135b45489c4507e468d51d9014f7f03" exitCode=0 Jan 28 13:35:33 crc kubenswrapper[4848]: I0128 13:35:33.211222 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01","Type":"ContainerDied","Data":"05b417f3e7200a95d4e971203fa6d642e135b45489c4507e468d51d9014f7f03"} Jan 28 13:35:34 crc kubenswrapper[4848]: I0128 13:35:34.227823 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01","Type":"ContainerStarted","Data":"1e902cb5b515dd04a63f445f72d0d73ec2df92c68af3c967837e2852b25bd418"} Jan 28 13:35:39 crc kubenswrapper[4848]: I0128 13:35:39.299045 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01","Type":"ContainerStarted","Data":"61d03774652a999ea912cc3b2ebfada18df95155b7f5f0d41538f9e3922e0198"} Jan 28 13:35:39 crc kubenswrapper[4848]: I0128 13:35:39.300089 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7bb0f426-1fdb-427a-ad1e-dc5387a1ba01","Type":"ContainerStarted","Data":"101a5664936eae813ca3783ca3d628eafe68ebce2cc908f1e03a8756fed40928"} Jan 28 13:35:39 crc kubenswrapper[4848]: I0128 13:35:39.361130 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=23.361099009 podStartE2EDuration="23.361099009s" podCreationTimestamp="2026-01-28 13:35:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:35:39.346341661 +0000 UTC m=+2966.258558709" watchObservedRunningTime="2026-01-28 13:35:39.361099009 +0000 UTC m=+2966.273316047" Jan 28 13:35:42 crc kubenswrapper[4848]: I0128 13:35:42.309748 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:47 crc kubenswrapper[4848]: I0128 13:35:47.309270 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:47 crc kubenswrapper[4848]: I0128 13:35:47.314900 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 13:35:47 crc kubenswrapper[4848]: I0128 13:35:47.403767 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 13:36:05 crc kubenswrapper[4848]: I0128 13:36:05.872039 4848 scope.go:117] "RemoveContainer" containerID="da9cc43340008dfc66d1c1f759a26601dcc6f05b2ae126964c3e43fa00ff3e17" Jan 28 13:36:05 crc kubenswrapper[4848]: I0128 13:36:05.901200 4848 scope.go:117] "RemoveContainer" containerID="e8b6318e2bb12a46630e1293e6b7168a2e5085fed5a358ea3adb1c24072bf940" Jan 28 13:36:05 crc kubenswrapper[4848]: I0128 13:36:05.962672 4848 scope.go:117] "RemoveContainer" containerID="5ebaefa9171c38760ffd414457b36921e7c0c08f6b6cd8116c58d9ebf4f8ec4c" Jan 28 13:36:05 crc kubenswrapper[4848]: I0128 13:36:05.998021 4848 scope.go:117] "RemoveContainer" containerID="6cd42bd9a1d13179e079b31f531dcf9df745313d43f018372add67ea22687774" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.215474 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.218881 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.222031 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.222741 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.222753 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.225785 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-4tj7f" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.231702 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316486 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-config-data\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316555 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316600 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316631 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316750 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316791 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6p7t\" (UniqueName: \"kubernetes.io/projected/08f6c3e6-eb26-471d-947f-11cb5533c6c8-kube-api-access-z6p7t\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316893 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316917 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.316956 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.418995 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6p7t\" (UniqueName: \"kubernetes.io/projected/08f6c3e6-eb26-471d-947f-11cb5533c6c8-kube-api-access-z6p7t\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419197 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419229 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419288 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419321 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-config-data\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419354 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419424 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419454 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.419495 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.420696 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.420847 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.421779 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.421915 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.422082 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-config-data\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.430605 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.432370 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.436855 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.440748 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6p7t\" (UniqueName: \"kubernetes.io/projected/08f6c3e6-eb26-471d-947f-11cb5533c6c8-kube-api-access-z6p7t\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.464601 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.555053 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 13:36:12 crc kubenswrapper[4848]: I0128 13:36:12.896489 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 28 13:36:13 crc kubenswrapper[4848]: I0128 13:36:13.735317 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"08f6c3e6-eb26-471d-947f-11cb5533c6c8","Type":"ContainerStarted","Data":"40a936dbf61fdecb41626cfe31499aa7382f09aea2977b32056f8117792369c2"} Jan 28 13:36:24 crc kubenswrapper[4848]: I0128 13:36:24.720552 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 28 13:36:25 crc kubenswrapper[4848]: I0128 13:36:25.896974 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"08f6c3e6-eb26-471d-947f-11cb5533c6c8","Type":"ContainerStarted","Data":"905eac889a53bd9fbe0fcf3d280b37b631b45b4f997b876dd49d9a4db45cd054"} Jan 28 13:36:25 crc kubenswrapper[4848]: I0128 13:36:25.926647 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.1079991 podStartE2EDuration="14.926614566s" podCreationTimestamp="2026-01-28 13:36:11 +0000 UTC" firstStartedPulling="2026-01-28 13:36:12.898007499 +0000 UTC m=+2999.810224557" lastFinishedPulling="2026-01-28 13:36:24.716622985 +0000 UTC m=+3011.628840023" observedRunningTime="2026-01-28 13:36:25.920187832 +0000 UTC m=+3012.832404900" watchObservedRunningTime="2026-01-28 13:36:25.926614566 +0000 UTC m=+3012.838831604" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.202736 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kvppl"] Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.206560 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.225842 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kvppl"] Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.327324 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnfmp\" (UniqueName: \"kubernetes.io/projected/2f26e80e-094d-4cc9-a375-79f7389308a5-kube-api-access-lnfmp\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.327434 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-utilities\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.330799 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-catalog-content\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.433337 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnfmp\" (UniqueName: \"kubernetes.io/projected/2f26e80e-094d-4cc9-a375-79f7389308a5-kube-api-access-lnfmp\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.433432 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-utilities\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.433563 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-catalog-content\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.434117 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-catalog-content\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.434263 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-utilities\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.463695 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnfmp\" (UniqueName: \"kubernetes.io/projected/2f26e80e-094d-4cc9-a375-79f7389308a5-kube-api-access-lnfmp\") pod \"community-operators-kvppl\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:36 crc kubenswrapper[4848]: I0128 13:37:36.536957 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:37 crc kubenswrapper[4848]: I0128 13:37:37.088464 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kvppl"] Jan 28 13:37:37 crc kubenswrapper[4848]: W0128 13:37:37.089729 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f26e80e_094d_4cc9_a375_79f7389308a5.slice/crio-4cd6a66c432ccbeb36e643c1a8ab5064d4d357887b9e2bc12be6ad061a678875 WatchSource:0}: Error finding container 4cd6a66c432ccbeb36e643c1a8ab5064d4d357887b9e2bc12be6ad061a678875: Status 404 returned error can't find the container with id 4cd6a66c432ccbeb36e643c1a8ab5064d4d357887b9e2bc12be6ad061a678875 Jan 28 13:37:37 crc kubenswrapper[4848]: I0128 13:37:37.849913 4848 generic.go:334] "Generic (PLEG): container finished" podID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerID="0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3" exitCode=0 Jan 28 13:37:37 crc kubenswrapper[4848]: I0128 13:37:37.850078 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvppl" event={"ID":"2f26e80e-094d-4cc9-a375-79f7389308a5","Type":"ContainerDied","Data":"0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3"} Jan 28 13:37:37 crc kubenswrapper[4848]: I0128 13:37:37.851400 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvppl" event={"ID":"2f26e80e-094d-4cc9-a375-79f7389308a5","Type":"ContainerStarted","Data":"4cd6a66c432ccbeb36e643c1a8ab5064d4d357887b9e2bc12be6ad061a678875"} Jan 28 13:37:37 crc kubenswrapper[4848]: I0128 13:37:37.924421 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:37:37 crc kubenswrapper[4848]: I0128 13:37:37.924503 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:37:38 crc kubenswrapper[4848]: I0128 13:37:38.868122 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvppl" event={"ID":"2f26e80e-094d-4cc9-a375-79f7389308a5","Type":"ContainerStarted","Data":"59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524"} Jan 28 13:37:40 crc kubenswrapper[4848]: I0128 13:37:40.897278 4848 generic.go:334] "Generic (PLEG): container finished" podID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerID="59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524" exitCode=0 Jan 28 13:37:40 crc kubenswrapper[4848]: I0128 13:37:40.897408 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvppl" event={"ID":"2f26e80e-094d-4cc9-a375-79f7389308a5","Type":"ContainerDied","Data":"59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524"} Jan 28 13:37:42 crc kubenswrapper[4848]: I0128 13:37:42.928533 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvppl" event={"ID":"2f26e80e-094d-4cc9-a375-79f7389308a5","Type":"ContainerStarted","Data":"e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5"} Jan 28 13:37:42 crc kubenswrapper[4848]: I0128 13:37:42.958540 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kvppl" podStartSLOduration=3.186524014 podStartE2EDuration="6.958502514s" podCreationTimestamp="2026-01-28 13:37:36 +0000 UTC" firstStartedPulling="2026-01-28 13:37:37.85247022 +0000 UTC m=+3084.764687268" lastFinishedPulling="2026-01-28 13:37:41.6244487 +0000 UTC m=+3088.536665768" observedRunningTime="2026-01-28 13:37:42.952074041 +0000 UTC m=+3089.864291119" watchObservedRunningTime="2026-01-28 13:37:42.958502514 +0000 UTC m=+3089.870719572" Jan 28 13:37:46 crc kubenswrapper[4848]: I0128 13:37:46.538624 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:46 crc kubenswrapper[4848]: I0128 13:37:46.539507 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:46 crc kubenswrapper[4848]: I0128 13:37:46.597748 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:47 crc kubenswrapper[4848]: I0128 13:37:47.022364 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:49 crc kubenswrapper[4848]: I0128 13:37:49.996499 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kvppl"] Jan 28 13:37:49 crc kubenswrapper[4848]: I0128 13:37:49.997204 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kvppl" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="registry-server" containerID="cri-o://e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5" gracePeriod=2 Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.471278 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.538344 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-utilities\") pod \"2f26e80e-094d-4cc9-a375-79f7389308a5\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.538573 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-catalog-content\") pod \"2f26e80e-094d-4cc9-a375-79f7389308a5\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.538629 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnfmp\" (UniqueName: \"kubernetes.io/projected/2f26e80e-094d-4cc9-a375-79f7389308a5-kube-api-access-lnfmp\") pod \"2f26e80e-094d-4cc9-a375-79f7389308a5\" (UID: \"2f26e80e-094d-4cc9-a375-79f7389308a5\") " Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.540105 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-utilities" (OuterVolumeSpecName: "utilities") pod "2f26e80e-094d-4cc9-a375-79f7389308a5" (UID: "2f26e80e-094d-4cc9-a375-79f7389308a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.549473 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f26e80e-094d-4cc9-a375-79f7389308a5-kube-api-access-lnfmp" (OuterVolumeSpecName: "kube-api-access-lnfmp") pod "2f26e80e-094d-4cc9-a375-79f7389308a5" (UID: "2f26e80e-094d-4cc9-a375-79f7389308a5"). InnerVolumeSpecName "kube-api-access-lnfmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.609482 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f26e80e-094d-4cc9-a375-79f7389308a5" (UID: "2f26e80e-094d-4cc9-a375-79f7389308a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.642443 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.642492 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnfmp\" (UniqueName: \"kubernetes.io/projected/2f26e80e-094d-4cc9-a375-79f7389308a5-kube-api-access-lnfmp\") on node \"crc\" DevicePath \"\"" Jan 28 13:37:50 crc kubenswrapper[4848]: I0128 13:37:50.642508 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f26e80e-094d-4cc9-a375-79f7389308a5-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.016388 4848 generic.go:334] "Generic (PLEG): container finished" podID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerID="e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5" exitCode=0 Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.016426 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvppl" event={"ID":"2f26e80e-094d-4cc9-a375-79f7389308a5","Type":"ContainerDied","Data":"e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5"} Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.016475 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvppl" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.016506 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvppl" event={"ID":"2f26e80e-094d-4cc9-a375-79f7389308a5","Type":"ContainerDied","Data":"4cd6a66c432ccbeb36e643c1a8ab5064d4d357887b9e2bc12be6ad061a678875"} Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.016530 4848 scope.go:117] "RemoveContainer" containerID="e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.052158 4848 scope.go:117] "RemoveContainer" containerID="59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.053161 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kvppl"] Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.065968 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kvppl"] Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.086165 4848 scope.go:117] "RemoveContainer" containerID="0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.133643 4848 scope.go:117] "RemoveContainer" containerID="e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5" Jan 28 13:37:51 crc kubenswrapper[4848]: E0128 13:37:51.134101 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5\": container with ID starting with e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5 not found: ID does not exist" containerID="e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.134137 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5"} err="failed to get container status \"e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5\": rpc error: code = NotFound desc = could not find container \"e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5\": container with ID starting with e32f7726ba2fe1f15221d28dd08302fd284d16d82d1dc898fbcbfd147cab21e5 not found: ID does not exist" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.134164 4848 scope.go:117] "RemoveContainer" containerID="59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524" Jan 28 13:37:51 crc kubenswrapper[4848]: E0128 13:37:51.134607 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524\": container with ID starting with 59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524 not found: ID does not exist" containerID="59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.134635 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524"} err="failed to get container status \"59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524\": rpc error: code = NotFound desc = could not find container \"59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524\": container with ID starting with 59cf208ead9ee1cf10849e4c75b733ce945a3a6f6232835d53976359a0a77524 not found: ID does not exist" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.134650 4848 scope.go:117] "RemoveContainer" containerID="0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3" Jan 28 13:37:51 crc kubenswrapper[4848]: E0128 13:37:51.134986 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3\": container with ID starting with 0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3 not found: ID does not exist" containerID="0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3" Jan 28 13:37:51 crc kubenswrapper[4848]: I0128 13:37:51.135008 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3"} err="failed to get container status \"0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3\": rpc error: code = NotFound desc = could not find container \"0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3\": container with ID starting with 0ef3d793ed7f7384a0eebaacf2738ddfc860c164051e3a5defe5397114086ca3 not found: ID does not exist" Jan 28 13:37:52 crc kubenswrapper[4848]: I0128 13:37:52.874393 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" path="/var/lib/kubelet/pods/2f26e80e-094d-4cc9-a375-79f7389308a5/volumes" Jan 28 13:38:07 crc kubenswrapper[4848]: I0128 13:38:07.924932 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:38:07 crc kubenswrapper[4848]: I0128 13:38:07.925870 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:38:37 crc kubenswrapper[4848]: I0128 13:38:37.925144 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:38:37 crc kubenswrapper[4848]: I0128 13:38:37.926066 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:38:37 crc kubenswrapper[4848]: I0128 13:38:37.926173 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:38:37 crc kubenswrapper[4848]: I0128 13:38:37.927418 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:38:37 crc kubenswrapper[4848]: I0128 13:38:37.927501 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" gracePeriod=600 Jan 28 13:38:38 crc kubenswrapper[4848]: E0128 13:38:38.056388 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:38:38 crc kubenswrapper[4848]: I0128 13:38:38.594783 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" exitCode=0 Jan 28 13:38:38 crc kubenswrapper[4848]: I0128 13:38:38.595228 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696"} Jan 28 13:38:38 crc kubenswrapper[4848]: I0128 13:38:38.595293 4848 scope.go:117] "RemoveContainer" containerID="04fa88df5b6c08221521ccc691315473dbd0a3b2d5381aaf58b37daaf6f3cc2f" Jan 28 13:38:38 crc kubenswrapper[4848]: I0128 13:38:38.597185 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:38:38 crc kubenswrapper[4848]: E0128 13:38:38.597594 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:38:49 crc kubenswrapper[4848]: I0128 13:38:49.850873 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:38:49 crc kubenswrapper[4848]: E0128 13:38:49.852796 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:39:01 crc kubenswrapper[4848]: I0128 13:39:01.850076 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:39:01 crc kubenswrapper[4848]: E0128 13:39:01.851140 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:39:14 crc kubenswrapper[4848]: I0128 13:39:14.861657 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:39:14 crc kubenswrapper[4848]: E0128 13:39:14.863096 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:39:26 crc kubenswrapper[4848]: I0128 13:39:26.852051 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:39:26 crc kubenswrapper[4848]: E0128 13:39:26.853659 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:39:37 crc kubenswrapper[4848]: I0128 13:39:37.850171 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:39:37 crc kubenswrapper[4848]: E0128 13:39:37.851472 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:39:51 crc kubenswrapper[4848]: I0128 13:39:51.850429 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:39:51 crc kubenswrapper[4848]: E0128 13:39:51.851912 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:40:06 crc kubenswrapper[4848]: I0128 13:40:06.851402 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:40:06 crc kubenswrapper[4848]: E0128 13:40:06.852751 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:40:18 crc kubenswrapper[4848]: I0128 13:40:18.851664 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:40:18 crc kubenswrapper[4848]: E0128 13:40:18.853050 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:40:33 crc kubenswrapper[4848]: I0128 13:40:33.850952 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:40:33 crc kubenswrapper[4848]: E0128 13:40:33.851920 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:40:45 crc kubenswrapper[4848]: I0128 13:40:45.850785 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:40:45 crc kubenswrapper[4848]: E0128 13:40:45.851567 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:40:58 crc kubenswrapper[4848]: I0128 13:40:58.850742 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:40:58 crc kubenswrapper[4848]: E0128 13:40:58.851665 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:41:13 crc kubenswrapper[4848]: I0128 13:41:13.850995 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:41:13 crc kubenswrapper[4848]: E0128 13:41:13.852111 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:41:27 crc kubenswrapper[4848]: I0128 13:41:27.851238 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:41:27 crc kubenswrapper[4848]: E0128 13:41:27.853087 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:41:40 crc kubenswrapper[4848]: I0128 13:41:40.850963 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:41:40 crc kubenswrapper[4848]: E0128 13:41:40.852476 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:41:52 crc kubenswrapper[4848]: I0128 13:41:52.850685 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:41:52 crc kubenswrapper[4848]: E0128 13:41:52.851747 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:42:06 crc kubenswrapper[4848]: I0128 13:42:06.851004 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:42:06 crc kubenswrapper[4848]: E0128 13:42:06.852296 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:42:20 crc kubenswrapper[4848]: I0128 13:42:20.852175 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:42:20 crc kubenswrapper[4848]: E0128 13:42:20.853642 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:42:32 crc kubenswrapper[4848]: I0128 13:42:32.850320 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:42:32 crc kubenswrapper[4848]: E0128 13:42:32.851643 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.852386 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jll9s"] Jan 28 13:42:37 crc kubenswrapper[4848]: E0128 13:42:37.853757 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="extract-content" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.853774 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="extract-content" Jan 28 13:42:37 crc kubenswrapper[4848]: E0128 13:42:37.853785 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="extract-utilities" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.853792 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="extract-utilities" Jan 28 13:42:37 crc kubenswrapper[4848]: E0128 13:42:37.853824 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="registry-server" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.853834 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="registry-server" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.854156 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f26e80e-094d-4cc9-a375-79f7389308a5" containerName="registry-server" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.855884 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.868069 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jll9s"] Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.965993 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-catalog-content\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.966854 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvg9q\" (UniqueName: \"kubernetes.io/projected/f5daf273-2fdc-4706-8f43-022d2f4a2770-kube-api-access-cvg9q\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:37 crc kubenswrapper[4848]: I0128 13:42:37.967202 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-utilities\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.073971 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvg9q\" (UniqueName: \"kubernetes.io/projected/f5daf273-2fdc-4706-8f43-022d2f4a2770-kube-api-access-cvg9q\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.074555 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-utilities\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.074614 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-catalog-content\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.075238 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-catalog-content\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.075515 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-utilities\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.111366 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvg9q\" (UniqueName: \"kubernetes.io/projected/f5daf273-2fdc-4706-8f43-022d2f4a2770-kube-api-access-cvg9q\") pod \"redhat-operators-jll9s\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.192982 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:38 crc kubenswrapper[4848]: I0128 13:42:38.717451 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jll9s"] Jan 28 13:42:39 crc kubenswrapper[4848]: I0128 13:42:39.496162 4848 generic.go:334] "Generic (PLEG): container finished" podID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerID="7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106" exitCode=0 Jan 28 13:42:39 crc kubenswrapper[4848]: I0128 13:42:39.496294 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jll9s" event={"ID":"f5daf273-2fdc-4706-8f43-022d2f4a2770","Type":"ContainerDied","Data":"7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106"} Jan 28 13:42:39 crc kubenswrapper[4848]: I0128 13:42:39.496741 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jll9s" event={"ID":"f5daf273-2fdc-4706-8f43-022d2f4a2770","Type":"ContainerStarted","Data":"f9b7697f572ed2312808a8cf42b26db5b04626f4f0bb21bec3c690e6f26655a5"} Jan 28 13:42:39 crc kubenswrapper[4848]: I0128 13:42:39.498756 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:42:41 crc kubenswrapper[4848]: I0128 13:42:41.522988 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jll9s" event={"ID":"f5daf273-2fdc-4706-8f43-022d2f4a2770","Type":"ContainerStarted","Data":"27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3"} Jan 28 13:42:44 crc kubenswrapper[4848]: I0128 13:42:44.851211 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:42:44 crc kubenswrapper[4848]: E0128 13:42:44.852695 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:42:49 crc kubenswrapper[4848]: I0128 13:42:49.621266 4848 generic.go:334] "Generic (PLEG): container finished" podID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerID="27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3" exitCode=0 Jan 28 13:42:49 crc kubenswrapper[4848]: I0128 13:42:49.621318 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jll9s" event={"ID":"f5daf273-2fdc-4706-8f43-022d2f4a2770","Type":"ContainerDied","Data":"27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3"} Jan 28 13:42:50 crc kubenswrapper[4848]: I0128 13:42:50.641604 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jll9s" event={"ID":"f5daf273-2fdc-4706-8f43-022d2f4a2770","Type":"ContainerStarted","Data":"ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536"} Jan 28 13:42:58 crc kubenswrapper[4848]: I0128 13:42:58.193983 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:58 crc kubenswrapper[4848]: I0128 13:42:58.195058 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:58 crc kubenswrapper[4848]: I0128 13:42:58.276199 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:58 crc kubenswrapper[4848]: I0128 13:42:58.314396 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jll9s" podStartSLOduration=10.471179906 podStartE2EDuration="21.314371363s" podCreationTimestamp="2026-01-28 13:42:37 +0000 UTC" firstStartedPulling="2026-01-28 13:42:39.498424094 +0000 UTC m=+3386.410641132" lastFinishedPulling="2026-01-28 13:42:50.341615541 +0000 UTC m=+3397.253832589" observedRunningTime="2026-01-28 13:42:50.685109118 +0000 UTC m=+3397.597326166" watchObservedRunningTime="2026-01-28 13:42:58.314371363 +0000 UTC m=+3405.226588411" Jan 28 13:42:58 crc kubenswrapper[4848]: I0128 13:42:58.820046 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:42:58 crc kubenswrapper[4848]: I0128 13:42:58.850480 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:42:58 crc kubenswrapper[4848]: E0128 13:42:58.850768 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:42:58 crc kubenswrapper[4848]: I0128 13:42:58.876605 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jll9s"] Jan 28 13:43:00 crc kubenswrapper[4848]: I0128 13:43:00.785417 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jll9s" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="registry-server" containerID="cri-o://ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536" gracePeriod=2 Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.442151 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.455362 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-utilities\") pod \"f5daf273-2fdc-4706-8f43-022d2f4a2770\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.455795 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvg9q\" (UniqueName: \"kubernetes.io/projected/f5daf273-2fdc-4706-8f43-022d2f4a2770-kube-api-access-cvg9q\") pod \"f5daf273-2fdc-4706-8f43-022d2f4a2770\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.455907 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-catalog-content\") pod \"f5daf273-2fdc-4706-8f43-022d2f4a2770\" (UID: \"f5daf273-2fdc-4706-8f43-022d2f4a2770\") " Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.456555 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-utilities" (OuterVolumeSpecName: "utilities") pod "f5daf273-2fdc-4706-8f43-022d2f4a2770" (UID: "f5daf273-2fdc-4706-8f43-022d2f4a2770"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.474838 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5daf273-2fdc-4706-8f43-022d2f4a2770-kube-api-access-cvg9q" (OuterVolumeSpecName: "kube-api-access-cvg9q") pod "f5daf273-2fdc-4706-8f43-022d2f4a2770" (UID: "f5daf273-2fdc-4706-8f43-022d2f4a2770"). InnerVolumeSpecName "kube-api-access-cvg9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.560123 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.560178 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvg9q\" (UniqueName: \"kubernetes.io/projected/f5daf273-2fdc-4706-8f43-022d2f4a2770-kube-api-access-cvg9q\") on node \"crc\" DevicePath \"\"" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.618151 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5daf273-2fdc-4706-8f43-022d2f4a2770" (UID: "f5daf273-2fdc-4706-8f43-022d2f4a2770"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.663454 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5daf273-2fdc-4706-8f43-022d2f4a2770-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.799978 4848 generic.go:334] "Generic (PLEG): container finished" podID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerID="ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536" exitCode=0 Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.800037 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jll9s" event={"ID":"f5daf273-2fdc-4706-8f43-022d2f4a2770","Type":"ContainerDied","Data":"ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536"} Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.800080 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jll9s" event={"ID":"f5daf273-2fdc-4706-8f43-022d2f4a2770","Type":"ContainerDied","Data":"f9b7697f572ed2312808a8cf42b26db5b04626f4f0bb21bec3c690e6f26655a5"} Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.800103 4848 scope.go:117] "RemoveContainer" containerID="ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.800105 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jll9s" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.850696 4848 scope.go:117] "RemoveContainer" containerID="27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.871633 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jll9s"] Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.885886 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jll9s"] Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.894398 4848 scope.go:117] "RemoveContainer" containerID="7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.938313 4848 scope.go:117] "RemoveContainer" containerID="ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536" Jan 28 13:43:01 crc kubenswrapper[4848]: E0128 13:43:01.939160 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536\": container with ID starting with ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536 not found: ID does not exist" containerID="ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.939232 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536"} err="failed to get container status \"ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536\": rpc error: code = NotFound desc = could not find container \"ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536\": container with ID starting with ac86aeece3feda7578aa4a3c3acf5bf140e2304b3b1456af93dd88e4d6416536 not found: ID does not exist" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.939293 4848 scope.go:117] "RemoveContainer" containerID="27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3" Jan 28 13:43:01 crc kubenswrapper[4848]: E0128 13:43:01.939714 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3\": container with ID starting with 27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3 not found: ID does not exist" containerID="27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.939777 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3"} err="failed to get container status \"27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3\": rpc error: code = NotFound desc = could not find container \"27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3\": container with ID starting with 27be1964e08194fea4ff7a2b3e9a6ca7ac03c1f7153bb244b069e97fa894d8b3 not found: ID does not exist" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.939819 4848 scope.go:117] "RemoveContainer" containerID="7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106" Jan 28 13:43:01 crc kubenswrapper[4848]: E0128 13:43:01.940371 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106\": container with ID starting with 7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106 not found: ID does not exist" containerID="7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106" Jan 28 13:43:01 crc kubenswrapper[4848]: I0128 13:43:01.940405 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106"} err="failed to get container status \"7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106\": rpc error: code = NotFound desc = could not find container \"7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106\": container with ID starting with 7cacb9c39b6aeeb10ba3afc4d36cbf9c26a96f25cd95434d0831dc5454122106 not found: ID does not exist" Jan 28 13:43:02 crc kubenswrapper[4848]: I0128 13:43:02.865947 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" path="/var/lib/kubelet/pods/f5daf273-2fdc-4706-8f43-022d2f4a2770/volumes" Jan 28 13:43:10 crc kubenswrapper[4848]: I0128 13:43:10.851022 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:43:10 crc kubenswrapper[4848]: E0128 13:43:10.852878 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:43:22 crc kubenswrapper[4848]: I0128 13:43:22.851064 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:43:22 crc kubenswrapper[4848]: E0128 13:43:22.852198 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:43:35 crc kubenswrapper[4848]: I0128 13:43:35.850852 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:43:35 crc kubenswrapper[4848]: E0128 13:43:35.853684 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:43:48 crc kubenswrapper[4848]: I0128 13:43:48.850803 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:43:49 crc kubenswrapper[4848]: I0128 13:43:49.366813 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"b49f692be73af62ca0362888898919d862b1f9f6a2be9652f34ed60d6ad42d0d"} Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.112227 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xmj5z"] Jan 28 13:44:49 crc kubenswrapper[4848]: E0128 13:44:49.113276 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="extract-content" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.113291 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="extract-content" Jan 28 13:44:49 crc kubenswrapper[4848]: E0128 13:44:49.113315 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="registry-server" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.113321 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="registry-server" Jan 28 13:44:49 crc kubenswrapper[4848]: E0128 13:44:49.113340 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="extract-utilities" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.113345 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="extract-utilities" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.113559 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5daf273-2fdc-4706-8f43-022d2f4a2770" containerName="registry-server" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.115352 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.126144 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xmj5z"] Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.195037 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cljfv\" (UniqueName: \"kubernetes.io/projected/0faec2cf-cb9a-4c84-b020-2782d1927242-kube-api-access-cljfv\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.195122 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0faec2cf-cb9a-4c84-b020-2782d1927242-utilities\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.195402 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0faec2cf-cb9a-4c84-b020-2782d1927242-catalog-content\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.297437 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0faec2cf-cb9a-4c84-b020-2782d1927242-catalog-content\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.297571 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cljfv\" (UniqueName: \"kubernetes.io/projected/0faec2cf-cb9a-4c84-b020-2782d1927242-kube-api-access-cljfv\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.297614 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0faec2cf-cb9a-4c84-b020-2782d1927242-utilities\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.298182 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0faec2cf-cb9a-4c84-b020-2782d1927242-catalog-content\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.298344 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0faec2cf-cb9a-4c84-b020-2782d1927242-utilities\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.321583 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cljfv\" (UniqueName: \"kubernetes.io/projected/0faec2cf-cb9a-4c84-b020-2782d1927242-kube-api-access-cljfv\") pod \"certified-operators-xmj5z\" (UID: \"0faec2cf-cb9a-4c84-b020-2782d1927242\") " pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:49 crc kubenswrapper[4848]: I0128 13:44:49.440391 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.469295 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xmj5z"] Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.514543 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pzt4d"] Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.517170 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.526564 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzt4d"] Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.558753 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfz8w\" (UniqueName: \"kubernetes.io/projected/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-kube-api-access-xfz8w\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.560418 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-utilities\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.563024 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-catalog-content\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.666300 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-catalog-content\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.666415 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfz8w\" (UniqueName: \"kubernetes.io/projected/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-kube-api-access-xfz8w\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.666505 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-utilities\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.667835 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-utilities\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.667792 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-catalog-content\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.707861 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfz8w\" (UniqueName: \"kubernetes.io/projected/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-kube-api-access-xfz8w\") pod \"redhat-marketplace-pzt4d\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:51 crc kubenswrapper[4848]: I0128 13:44:51.972540 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:44:52 crc kubenswrapper[4848]: I0128 13:44:52.188691 4848 generic.go:334] "Generic (PLEG): container finished" podID="0faec2cf-cb9a-4c84-b020-2782d1927242" containerID="0cdaef7caac0eebaa65471bbd71c34c946ade235e80c5368dd5aa6307c79aa2f" exitCode=0 Jan 28 13:44:52 crc kubenswrapper[4848]: I0128 13:44:52.189034 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmj5z" event={"ID":"0faec2cf-cb9a-4c84-b020-2782d1927242","Type":"ContainerDied","Data":"0cdaef7caac0eebaa65471bbd71c34c946ade235e80c5368dd5aa6307c79aa2f"} Jan 28 13:44:52 crc kubenswrapper[4848]: I0128 13:44:52.189067 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmj5z" event={"ID":"0faec2cf-cb9a-4c84-b020-2782d1927242","Type":"ContainerStarted","Data":"8e45e993b9e742cd26c1498d3c6b160445cf45dd750ffc02e69579bcdac4afd1"} Jan 28 13:44:52 crc kubenswrapper[4848]: I0128 13:44:52.575392 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzt4d"] Jan 28 13:44:53 crc kubenswrapper[4848]: I0128 13:44:53.203555 4848 generic.go:334] "Generic (PLEG): container finished" podID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerID="71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7" exitCode=0 Jan 28 13:44:53 crc kubenswrapper[4848]: I0128 13:44:53.203627 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzt4d" event={"ID":"2ec282cd-9386-4c4f-94c8-2e4e7c77d699","Type":"ContainerDied","Data":"71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7"} Jan 28 13:44:53 crc kubenswrapper[4848]: I0128 13:44:53.203668 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzt4d" event={"ID":"2ec282cd-9386-4c4f-94c8-2e4e7c77d699","Type":"ContainerStarted","Data":"b2ef73d0d195716733a634eb9798d56f100c7a6d271a9160aafad0c93c962cfc"} Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.160661 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh"] Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.163102 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.167141 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.168511 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.176396 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh"] Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.246534 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/068b97ab-42f7-4b69-bca2-1cd58c1298ae-config-volume\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.246994 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/068b97ab-42f7-4b69-bca2-1cd58c1298ae-secret-volume\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.247185 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgg4h\" (UniqueName: \"kubernetes.io/projected/068b97ab-42f7-4b69-bca2-1cd58c1298ae-kube-api-access-wgg4h\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.296241 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzt4d" event={"ID":"2ec282cd-9386-4c4f-94c8-2e4e7c77d699","Type":"ContainerStarted","Data":"621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0"} Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.350544 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/068b97ab-42f7-4b69-bca2-1cd58c1298ae-config-volume\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.350700 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/068b97ab-42f7-4b69-bca2-1cd58c1298ae-secret-volume\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.350769 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgg4h\" (UniqueName: \"kubernetes.io/projected/068b97ab-42f7-4b69-bca2-1cd58c1298ae-kube-api-access-wgg4h\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.351977 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/068b97ab-42f7-4b69-bca2-1cd58c1298ae-config-volume\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.362514 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/068b97ab-42f7-4b69-bca2-1cd58c1298ae-secret-volume\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.374954 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgg4h\" (UniqueName: \"kubernetes.io/projected/068b97ab-42f7-4b69-bca2-1cd58c1298ae-kube-api-access-wgg4h\") pod \"collect-profiles-29493465-57wrh\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:00 crc kubenswrapper[4848]: I0128 13:45:00.486741 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:01 crc kubenswrapper[4848]: W0128 13:45:01.102129 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod068b97ab_42f7_4b69_bca2_1cd58c1298ae.slice/crio-b24ab9e6b6ef63f83bc7fb28de7413288709c6cbdef43e748c49184e3e1f6944 WatchSource:0}: Error finding container b24ab9e6b6ef63f83bc7fb28de7413288709c6cbdef43e748c49184e3e1f6944: Status 404 returned error can't find the container with id b24ab9e6b6ef63f83bc7fb28de7413288709c6cbdef43e748c49184e3e1f6944 Jan 28 13:45:01 crc kubenswrapper[4848]: I0128 13:45:01.102414 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh"] Jan 28 13:45:01 crc kubenswrapper[4848]: I0128 13:45:01.310422 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmj5z" event={"ID":"0faec2cf-cb9a-4c84-b020-2782d1927242","Type":"ContainerStarted","Data":"ae9b9b611681b5e42b6ee38893ddc611f0042237af0caf604cc494e5d2d69e4c"} Jan 28 13:45:01 crc kubenswrapper[4848]: I0128 13:45:01.312197 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" event={"ID":"068b97ab-42f7-4b69-bca2-1cd58c1298ae","Type":"ContainerStarted","Data":"b24ab9e6b6ef63f83bc7fb28de7413288709c6cbdef43e748c49184e3e1f6944"} Jan 28 13:45:02 crc kubenswrapper[4848]: I0128 13:45:02.325712 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" event={"ID":"068b97ab-42f7-4b69-bca2-1cd58c1298ae","Type":"ContainerStarted","Data":"3085267885a4c81fa866e651847a7a285cee48bc4ffad715fc0cbd136e91f8dd"} Jan 28 13:45:02 crc kubenswrapper[4848]: I0128 13:45:02.328502 4848 generic.go:334] "Generic (PLEG): container finished" podID="0faec2cf-cb9a-4c84-b020-2782d1927242" containerID="ae9b9b611681b5e42b6ee38893ddc611f0042237af0caf604cc494e5d2d69e4c" exitCode=0 Jan 28 13:45:02 crc kubenswrapper[4848]: I0128 13:45:02.328538 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmj5z" event={"ID":"0faec2cf-cb9a-4c84-b020-2782d1927242","Type":"ContainerDied","Data":"ae9b9b611681b5e42b6ee38893ddc611f0042237af0caf604cc494e5d2d69e4c"} Jan 28 13:45:03 crc kubenswrapper[4848]: I0128 13:45:03.368651 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" podStartSLOduration=3.368620901 podStartE2EDuration="3.368620901s" podCreationTimestamp="2026-01-28 13:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 13:45:03.360663503 +0000 UTC m=+3530.272880541" watchObservedRunningTime="2026-01-28 13:45:03.368620901 +0000 UTC m=+3530.280837939" Jan 28 13:45:04 crc kubenswrapper[4848]: I0128 13:45:04.351328 4848 generic.go:334] "Generic (PLEG): container finished" podID="068b97ab-42f7-4b69-bca2-1cd58c1298ae" containerID="3085267885a4c81fa866e651847a7a285cee48bc4ffad715fc0cbd136e91f8dd" exitCode=0 Jan 28 13:45:04 crc kubenswrapper[4848]: I0128 13:45:04.351392 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" event={"ID":"068b97ab-42f7-4b69-bca2-1cd58c1298ae","Type":"ContainerDied","Data":"3085267885a4c81fa866e651847a7a285cee48bc4ffad715fc0cbd136e91f8dd"} Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.850349 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.944022 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/068b97ab-42f7-4b69-bca2-1cd58c1298ae-secret-volume\") pod \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.944191 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/068b97ab-42f7-4b69-bca2-1cd58c1298ae-config-volume\") pod \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.944272 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgg4h\" (UniqueName: \"kubernetes.io/projected/068b97ab-42f7-4b69-bca2-1cd58c1298ae-kube-api-access-wgg4h\") pod \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\" (UID: \"068b97ab-42f7-4b69-bca2-1cd58c1298ae\") " Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.944861 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/068b97ab-42f7-4b69-bca2-1cd58c1298ae-config-volume" (OuterVolumeSpecName: "config-volume") pod "068b97ab-42f7-4b69-bca2-1cd58c1298ae" (UID: "068b97ab-42f7-4b69-bca2-1cd58c1298ae"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.945652 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/068b97ab-42f7-4b69-bca2-1cd58c1298ae-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.955633 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/068b97ab-42f7-4b69-bca2-1cd58c1298ae-kube-api-access-wgg4h" (OuterVolumeSpecName: "kube-api-access-wgg4h") pod "068b97ab-42f7-4b69-bca2-1cd58c1298ae" (UID: "068b97ab-42f7-4b69-bca2-1cd58c1298ae"). InnerVolumeSpecName "kube-api-access-wgg4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:45:05 crc kubenswrapper[4848]: I0128 13:45:05.955772 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/068b97ab-42f7-4b69-bca2-1cd58c1298ae-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "068b97ab-42f7-4b69-bca2-1cd58c1298ae" (UID: "068b97ab-42f7-4b69-bca2-1cd58c1298ae"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.048718 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgg4h\" (UniqueName: \"kubernetes.io/projected/068b97ab-42f7-4b69-bca2-1cd58c1298ae-kube-api-access-wgg4h\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.048757 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/068b97ab-42f7-4b69-bca2-1cd58c1298ae-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.379821 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" event={"ID":"068b97ab-42f7-4b69-bca2-1cd58c1298ae","Type":"ContainerDied","Data":"b24ab9e6b6ef63f83bc7fb28de7413288709c6cbdef43e748c49184e3e1f6944"} Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.380127 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b24ab9e6b6ef63f83bc7fb28de7413288709c6cbdef43e748c49184e3e1f6944" Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.379938 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh" Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.454074 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb"] Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.465486 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-ss2wb"] Jan 28 13:45:06 crc kubenswrapper[4848]: I0128 13:45:06.997518 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1917adb5-e9d0-44e9-9176-afb51d9c0f30" path="/var/lib/kubelet/pods/1917adb5-e9d0-44e9-9176-afb51d9c0f30/volumes" Jan 28 13:45:08 crc kubenswrapper[4848]: I0128 13:45:08.406233 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmj5z" event={"ID":"0faec2cf-cb9a-4c84-b020-2782d1927242","Type":"ContainerStarted","Data":"8c8588b4bb9498fbfc3f13ebb30bcf7ce8f2dc53e493116500a1bb91da3e9abe"} Jan 28 13:45:09 crc kubenswrapper[4848]: I0128 13:45:09.445621 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:45:09 crc kubenswrapper[4848]: I0128 13:45:09.446126 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:45:09 crc kubenswrapper[4848]: I0128 13:45:09.455667 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xmj5z" podStartSLOduration=5.303350559 podStartE2EDuration="20.455642173s" podCreationTimestamp="2026-01-28 13:44:49 +0000 UTC" firstStartedPulling="2026-01-28 13:44:52.193515495 +0000 UTC m=+3519.105732543" lastFinishedPulling="2026-01-28 13:45:07.345807119 +0000 UTC m=+3534.258024157" observedRunningTime="2026-01-28 13:45:09.446111081 +0000 UTC m=+3536.358328119" watchObservedRunningTime="2026-01-28 13:45:09.455642173 +0000 UTC m=+3536.367859211" Jan 28 13:45:10 crc kubenswrapper[4848]: I0128 13:45:10.435209 4848 generic.go:334] "Generic (PLEG): container finished" podID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerID="621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0" exitCode=0 Jan 28 13:45:10 crc kubenswrapper[4848]: I0128 13:45:10.435431 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzt4d" event={"ID":"2ec282cd-9386-4c4f-94c8-2e4e7c77d699","Type":"ContainerDied","Data":"621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0"} Jan 28 13:45:10 crc kubenswrapper[4848]: I0128 13:45:10.512491 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-xmj5z" podUID="0faec2cf-cb9a-4c84-b020-2782d1927242" containerName="registry-server" probeResult="failure" output=< Jan 28 13:45:10 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:45:10 crc kubenswrapper[4848]: > Jan 28 13:45:15 crc kubenswrapper[4848]: I0128 13:45:15.503342 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzt4d" event={"ID":"2ec282cd-9386-4c4f-94c8-2e4e7c77d699","Type":"ContainerStarted","Data":"2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d"} Jan 28 13:45:15 crc kubenswrapper[4848]: I0128 13:45:15.530065 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pzt4d" podStartSLOduration=3.300479769 podStartE2EDuration="24.530035979s" podCreationTimestamp="2026-01-28 13:44:51 +0000 UTC" firstStartedPulling="2026-01-28 13:44:53.205954521 +0000 UTC m=+3520.118171559" lastFinishedPulling="2026-01-28 13:45:14.435510721 +0000 UTC m=+3541.347727769" observedRunningTime="2026-01-28 13:45:15.523052697 +0000 UTC m=+3542.435269745" watchObservedRunningTime="2026-01-28 13:45:15.530035979 +0000 UTC m=+3542.442253017" Jan 28 13:45:19 crc kubenswrapper[4848]: I0128 13:45:19.516278 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:45:19 crc kubenswrapper[4848]: I0128 13:45:19.577769 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xmj5z" Jan 28 13:45:20 crc kubenswrapper[4848]: I0128 13:45:20.123969 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xmj5z"] Jan 28 13:45:20 crc kubenswrapper[4848]: I0128 13:45:20.303932 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sf2v5"] Jan 28 13:45:20 crc kubenswrapper[4848]: I0128 13:45:20.304400 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sf2v5" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="registry-server" containerID="cri-o://2969ebabbe6a0eb6eea29fa7826924f19ab5dea4802785fa48a8022167f9ec0f" gracePeriod=2 Jan 28 13:45:20 crc kubenswrapper[4848]: I0128 13:45:20.576010 4848 generic.go:334] "Generic (PLEG): container finished" podID="02b30305-56c4-45c3-aae4-de194e8caa56" containerID="2969ebabbe6a0eb6eea29fa7826924f19ab5dea4802785fa48a8022167f9ec0f" exitCode=0 Jan 28 13:45:20 crc kubenswrapper[4848]: I0128 13:45:20.577187 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf2v5" event={"ID":"02b30305-56c4-45c3-aae4-de194e8caa56","Type":"ContainerDied","Data":"2969ebabbe6a0eb6eea29fa7826924f19ab5dea4802785fa48a8022167f9ec0f"} Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.015624 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.069009 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tm9lj\" (UniqueName: \"kubernetes.io/projected/02b30305-56c4-45c3-aae4-de194e8caa56-kube-api-access-tm9lj\") pod \"02b30305-56c4-45c3-aae4-de194e8caa56\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.069078 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-utilities\") pod \"02b30305-56c4-45c3-aae4-de194e8caa56\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.069123 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-catalog-content\") pod \"02b30305-56c4-45c3-aae4-de194e8caa56\" (UID: \"02b30305-56c4-45c3-aae4-de194e8caa56\") " Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.086405 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-utilities" (OuterVolumeSpecName: "utilities") pod "02b30305-56c4-45c3-aae4-de194e8caa56" (UID: "02b30305-56c4-45c3-aae4-de194e8caa56"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.096601 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02b30305-56c4-45c3-aae4-de194e8caa56-kube-api-access-tm9lj" (OuterVolumeSpecName: "kube-api-access-tm9lj") pod "02b30305-56c4-45c3-aae4-de194e8caa56" (UID: "02b30305-56c4-45c3-aae4-de194e8caa56"). InnerVolumeSpecName "kube-api-access-tm9lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.172620 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tm9lj\" (UniqueName: \"kubernetes.io/projected/02b30305-56c4-45c3-aae4-de194e8caa56-kube-api-access-tm9lj\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.172981 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.198800 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "02b30305-56c4-45c3-aae4-de194e8caa56" (UID: "02b30305-56c4-45c3-aae4-de194e8caa56"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.275810 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02b30305-56c4-45c3-aae4-de194e8caa56-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.591870 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf2v5" event={"ID":"02b30305-56c4-45c3-aae4-de194e8caa56","Type":"ContainerDied","Data":"aac2eb693b964ff2593038ce498308d76096e593c15e01546e30ac515b444986"} Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.591959 4848 scope.go:117] "RemoveContainer" containerID="2969ebabbe6a0eb6eea29fa7826924f19ab5dea4802785fa48a8022167f9ec0f" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.591963 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf2v5" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.628923 4848 scope.go:117] "RemoveContainer" containerID="1f16c72c559ceaf1cfa6a2c449a7e471c2d82f871612313f830a7a1a787becdf" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.638058 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sf2v5"] Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.652302 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sf2v5"] Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.664280 4848 scope.go:117] "RemoveContainer" containerID="03aca973d0f0eb1cd3e583b932d42bb95ba5a0ac11106d0a613ec37bd533d35e" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.973591 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:45:21 crc kubenswrapper[4848]: I0128 13:45:21.973924 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:45:22 crc kubenswrapper[4848]: I0128 13:45:22.061315 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:45:22 crc kubenswrapper[4848]: I0128 13:45:22.674199 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:45:22 crc kubenswrapper[4848]: I0128 13:45:22.865086 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" path="/var/lib/kubelet/pods/02b30305-56c4-45c3-aae4-de194e8caa56/volumes" Jan 28 13:45:23 crc kubenswrapper[4848]: I0128 13:45:23.702318 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzt4d"] Jan 28 13:45:25 crc kubenswrapper[4848]: I0128 13:45:25.637361 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pzt4d" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="registry-server" containerID="cri-o://2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d" gracePeriod=2 Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.174207 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.310514 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-catalog-content\") pod \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.310736 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfz8w\" (UniqueName: \"kubernetes.io/projected/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-kube-api-access-xfz8w\") pod \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.310818 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-utilities\") pod \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\" (UID: \"2ec282cd-9386-4c4f-94c8-2e4e7c77d699\") " Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.312356 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-utilities" (OuterVolumeSpecName: "utilities") pod "2ec282cd-9386-4c4f-94c8-2e4e7c77d699" (UID: "2ec282cd-9386-4c4f-94c8-2e4e7c77d699"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.320285 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-kube-api-access-xfz8w" (OuterVolumeSpecName: "kube-api-access-xfz8w") pod "2ec282cd-9386-4c4f-94c8-2e4e7c77d699" (UID: "2ec282cd-9386-4c4f-94c8-2e4e7c77d699"). InnerVolumeSpecName "kube-api-access-xfz8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.336744 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2ec282cd-9386-4c4f-94c8-2e4e7c77d699" (UID: "2ec282cd-9386-4c4f-94c8-2e4e7c77d699"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.414359 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.414406 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.414423 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfz8w\" (UniqueName: \"kubernetes.io/projected/2ec282cd-9386-4c4f-94c8-2e4e7c77d699-kube-api-access-xfz8w\") on node \"crc\" DevicePath \"\"" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.652026 4848 generic.go:334] "Generic (PLEG): container finished" podID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerID="2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d" exitCode=0 Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.652104 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzt4d" event={"ID":"2ec282cd-9386-4c4f-94c8-2e4e7c77d699","Type":"ContainerDied","Data":"2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d"} Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.652186 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzt4d" event={"ID":"2ec282cd-9386-4c4f-94c8-2e4e7c77d699","Type":"ContainerDied","Data":"b2ef73d0d195716733a634eb9798d56f100c7a6d271a9160aafad0c93c962cfc"} Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.652226 4848 scope.go:117] "RemoveContainer" containerID="2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.652124 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzt4d" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.685384 4848 scope.go:117] "RemoveContainer" containerID="621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.706650 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzt4d"] Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.716497 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzt4d"] Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.722967 4848 scope.go:117] "RemoveContainer" containerID="71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.785816 4848 scope.go:117] "RemoveContainer" containerID="2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d" Jan 28 13:45:26 crc kubenswrapper[4848]: E0128 13:45:26.787241 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d\": container with ID starting with 2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d not found: ID does not exist" containerID="2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.787330 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d"} err="failed to get container status \"2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d\": rpc error: code = NotFound desc = could not find container \"2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d\": container with ID starting with 2a5e1563a2a97bc3cebc309de8be57509e7a4434e639756afbac57e10ec3475d not found: ID does not exist" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.787372 4848 scope.go:117] "RemoveContainer" containerID="621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0" Jan 28 13:45:26 crc kubenswrapper[4848]: E0128 13:45:26.787993 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0\": container with ID starting with 621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0 not found: ID does not exist" containerID="621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.788040 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0"} err="failed to get container status \"621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0\": rpc error: code = NotFound desc = could not find container \"621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0\": container with ID starting with 621464fa34fdb36dd2b8259891fe195506ab067c50849891d1c50b4bb3ca93e0 not found: ID does not exist" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.788067 4848 scope.go:117] "RemoveContainer" containerID="71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7" Jan 28 13:45:26 crc kubenswrapper[4848]: E0128 13:45:26.788659 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7\": container with ID starting with 71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7 not found: ID does not exist" containerID="71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.788693 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7"} err="failed to get container status \"71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7\": rpc error: code = NotFound desc = could not find container \"71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7\": container with ID starting with 71ae22e689489229fb6ba7cc702634842d2e598d41e5f74262c2b35162dc15e7 not found: ID does not exist" Jan 28 13:45:26 crc kubenswrapper[4848]: I0128 13:45:26.861533 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" path="/var/lib/kubelet/pods/2ec282cd-9386-4c4f-94c8-2e4e7c77d699/volumes" Jan 28 13:46:06 crc kubenswrapper[4848]: I0128 13:46:06.297227 4848 scope.go:117] "RemoveContainer" containerID="3b0362aaee98900bfca0f46b8768113073100c8e5f1880eb1e4029b3aeb38104" Jan 28 13:46:07 crc kubenswrapper[4848]: I0128 13:46:07.924467 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:46:07 crc kubenswrapper[4848]: I0128 13:46:07.925510 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:46:37 crc kubenswrapper[4848]: I0128 13:46:37.924763 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:46:37 crc kubenswrapper[4848]: I0128 13:46:37.925302 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:47:07 crc kubenswrapper[4848]: I0128 13:47:07.924317 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:47:07 crc kubenswrapper[4848]: I0128 13:47:07.925003 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:47:07 crc kubenswrapper[4848]: I0128 13:47:07.925073 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:47:07 crc kubenswrapper[4848]: I0128 13:47:07.926337 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b49f692be73af62ca0362888898919d862b1f9f6a2be9652f34ed60d6ad42d0d"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:47:07 crc kubenswrapper[4848]: I0128 13:47:07.926405 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://b49f692be73af62ca0362888898919d862b1f9f6a2be9652f34ed60d6ad42d0d" gracePeriod=600 Jan 28 13:47:08 crc kubenswrapper[4848]: I0128 13:47:08.773235 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="b49f692be73af62ca0362888898919d862b1f9f6a2be9652f34ed60d6ad42d0d" exitCode=0 Jan 28 13:47:08 crc kubenswrapper[4848]: I0128 13:47:08.773291 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"b49f692be73af62ca0362888898919d862b1f9f6a2be9652f34ed60d6ad42d0d"} Jan 28 13:47:08 crc kubenswrapper[4848]: I0128 13:47:08.774219 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8"} Jan 28 13:47:08 crc kubenswrapper[4848]: I0128 13:47:08.774298 4848 scope.go:117] "RemoveContainer" containerID="93a2ea33ed9a533b6502a0fd704a28e2c662ee94d777e5d4ebaa67ecbba58696" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.864168 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kjb8r"] Jan 28 13:48:46 crc kubenswrapper[4848]: E0128 13:48:46.865545 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="extract-content" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.865565 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="extract-content" Jan 28 13:48:46 crc kubenswrapper[4848]: E0128 13:48:46.865579 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="068b97ab-42f7-4b69-bca2-1cd58c1298ae" containerName="collect-profiles" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.865592 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="068b97ab-42f7-4b69-bca2-1cd58c1298ae" containerName="collect-profiles" Jan 28 13:48:46 crc kubenswrapper[4848]: E0128 13:48:46.865633 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="registry-server" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.865642 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="registry-server" Jan 28 13:48:46 crc kubenswrapper[4848]: E0128 13:48:46.865664 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="extract-content" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.865676 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="extract-content" Jan 28 13:48:46 crc kubenswrapper[4848]: E0128 13:48:46.865691 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="extract-utilities" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.865701 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="extract-utilities" Jan 28 13:48:46 crc kubenswrapper[4848]: E0128 13:48:46.865730 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="extract-utilities" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.865739 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="extract-utilities" Jan 28 13:48:46 crc kubenswrapper[4848]: E0128 13:48:46.865757 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="registry-server" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.865766 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="registry-server" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.866019 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ec282cd-9386-4c4f-94c8-2e4e7c77d699" containerName="registry-server" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.866055 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="068b97ab-42f7-4b69-bca2-1cd58c1298ae" containerName="collect-profiles" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.866072 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="02b30305-56c4-45c3-aae4-de194e8caa56" containerName="registry-server" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.868344 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:46 crc kubenswrapper[4848]: I0128 13:48:46.876892 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kjb8r"] Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.032996 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-catalog-content\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.033351 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z9f6\" (UniqueName: \"kubernetes.io/projected/4f13ae37-5673-4f84-ad92-c96f37f846a2-kube-api-access-2z9f6\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.033743 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-utilities\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.136371 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z9f6\" (UniqueName: \"kubernetes.io/projected/4f13ae37-5673-4f84-ad92-c96f37f846a2-kube-api-access-2z9f6\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.136490 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-utilities\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.136604 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-catalog-content\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.137177 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-utilities\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.137200 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-catalog-content\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.164361 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z9f6\" (UniqueName: \"kubernetes.io/projected/4f13ae37-5673-4f84-ad92-c96f37f846a2-kube-api-access-2z9f6\") pod \"community-operators-kjb8r\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.228596 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.835938 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kjb8r"] Jan 28 13:48:47 crc kubenswrapper[4848]: I0128 13:48:47.904800 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjb8r" event={"ID":"4f13ae37-5673-4f84-ad92-c96f37f846a2","Type":"ContainerStarted","Data":"48fd5aef22ae24089ded03714c738b9d5fa0ecb509c7d4530c2aacbd3276f92b"} Jan 28 13:48:48 crc kubenswrapper[4848]: I0128 13:48:48.919329 4848 generic.go:334] "Generic (PLEG): container finished" podID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerID="6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b" exitCode=0 Jan 28 13:48:48 crc kubenswrapper[4848]: I0128 13:48:48.919431 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjb8r" event={"ID":"4f13ae37-5673-4f84-ad92-c96f37f846a2","Type":"ContainerDied","Data":"6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b"} Jan 28 13:48:48 crc kubenswrapper[4848]: I0128 13:48:48.922950 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:48:49 crc kubenswrapper[4848]: I0128 13:48:49.934733 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjb8r" event={"ID":"4f13ae37-5673-4f84-ad92-c96f37f846a2","Type":"ContainerStarted","Data":"70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574"} Jan 28 13:48:51 crc kubenswrapper[4848]: I0128 13:48:51.957667 4848 generic.go:334] "Generic (PLEG): container finished" podID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerID="70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574" exitCode=0 Jan 28 13:48:51 crc kubenswrapper[4848]: I0128 13:48:51.957746 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjb8r" event={"ID":"4f13ae37-5673-4f84-ad92-c96f37f846a2","Type":"ContainerDied","Data":"70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574"} Jan 28 13:48:52 crc kubenswrapper[4848]: I0128 13:48:52.971591 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjb8r" event={"ID":"4f13ae37-5673-4f84-ad92-c96f37f846a2","Type":"ContainerStarted","Data":"e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910"} Jan 28 13:48:52 crc kubenswrapper[4848]: I0128 13:48:52.999288 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kjb8r" podStartSLOduration=3.288601786 podStartE2EDuration="6.99924184s" podCreationTimestamp="2026-01-28 13:48:46 +0000 UTC" firstStartedPulling="2026-01-28 13:48:48.922550803 +0000 UTC m=+3755.834767851" lastFinishedPulling="2026-01-28 13:48:52.633190867 +0000 UTC m=+3759.545407905" observedRunningTime="2026-01-28 13:48:52.995037806 +0000 UTC m=+3759.907254844" watchObservedRunningTime="2026-01-28 13:48:52.99924184 +0000 UTC m=+3759.911458878" Jan 28 13:48:57 crc kubenswrapper[4848]: I0128 13:48:57.229846 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:57 crc kubenswrapper[4848]: I0128 13:48:57.231728 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:57 crc kubenswrapper[4848]: I0128 13:48:57.286166 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:58 crc kubenswrapper[4848]: I0128 13:48:58.133475 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:48:58 crc kubenswrapper[4848]: I0128 13:48:58.198733 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kjb8r"] Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.090232 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kjb8r" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="registry-server" containerID="cri-o://e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910" gracePeriod=2 Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.645926 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.746522 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-utilities\") pod \"4f13ae37-5673-4f84-ad92-c96f37f846a2\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.746670 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-catalog-content\") pod \"4f13ae37-5673-4f84-ad92-c96f37f846a2\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.746932 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2z9f6\" (UniqueName: \"kubernetes.io/projected/4f13ae37-5673-4f84-ad92-c96f37f846a2-kube-api-access-2z9f6\") pod \"4f13ae37-5673-4f84-ad92-c96f37f846a2\" (UID: \"4f13ae37-5673-4f84-ad92-c96f37f846a2\") " Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.750593 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-utilities" (OuterVolumeSpecName: "utilities") pod "4f13ae37-5673-4f84-ad92-c96f37f846a2" (UID: "4f13ae37-5673-4f84-ad92-c96f37f846a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.760618 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f13ae37-5673-4f84-ad92-c96f37f846a2-kube-api-access-2z9f6" (OuterVolumeSpecName: "kube-api-access-2z9f6") pod "4f13ae37-5673-4f84-ad92-c96f37f846a2" (UID: "4f13ae37-5673-4f84-ad92-c96f37f846a2"). InnerVolumeSpecName "kube-api-access-2z9f6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.801235 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f13ae37-5673-4f84-ad92-c96f37f846a2" (UID: "4f13ae37-5673-4f84-ad92-c96f37f846a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.856674 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.857057 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f13ae37-5673-4f84-ad92-c96f37f846a2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:49:00 crc kubenswrapper[4848]: I0128 13:49:00.857142 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2z9f6\" (UniqueName: \"kubernetes.io/projected/4f13ae37-5673-4f84-ad92-c96f37f846a2-kube-api-access-2z9f6\") on node \"crc\" DevicePath \"\"" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.106582 4848 generic.go:334] "Generic (PLEG): container finished" podID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerID="e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910" exitCode=0 Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.106668 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjb8r" event={"ID":"4f13ae37-5673-4f84-ad92-c96f37f846a2","Type":"ContainerDied","Data":"e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910"} Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.106715 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjb8r" event={"ID":"4f13ae37-5673-4f84-ad92-c96f37f846a2","Type":"ContainerDied","Data":"48fd5aef22ae24089ded03714c738b9d5fa0ecb509c7d4530c2aacbd3276f92b"} Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.106743 4848 scope.go:117] "RemoveContainer" containerID="e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.106968 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjb8r" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.142050 4848 scope.go:117] "RemoveContainer" containerID="70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.144137 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kjb8r"] Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.155943 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kjb8r"] Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.171772 4848 scope.go:117] "RemoveContainer" containerID="6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.235212 4848 scope.go:117] "RemoveContainer" containerID="e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910" Jan 28 13:49:01 crc kubenswrapper[4848]: E0128 13:49:01.242797 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910\": container with ID starting with e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910 not found: ID does not exist" containerID="e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.242901 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910"} err="failed to get container status \"e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910\": rpc error: code = NotFound desc = could not find container \"e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910\": container with ID starting with e94b0cae6b6c95ce36af96795b04462b1bb20184cf539f9559b2798ff4f70910 not found: ID does not exist" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.242949 4848 scope.go:117] "RemoveContainer" containerID="70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574" Jan 28 13:49:01 crc kubenswrapper[4848]: E0128 13:49:01.253560 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574\": container with ID starting with 70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574 not found: ID does not exist" containerID="70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.253646 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574"} err="failed to get container status \"70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574\": rpc error: code = NotFound desc = could not find container \"70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574\": container with ID starting with 70b18c80236005ae96126191cbcee5377992296fd04afe2b3923dd0bdbf29574 not found: ID does not exist" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.253679 4848 scope.go:117] "RemoveContainer" containerID="6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b" Jan 28 13:49:01 crc kubenswrapper[4848]: E0128 13:49:01.254412 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b\": container with ID starting with 6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b not found: ID does not exist" containerID="6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b" Jan 28 13:49:01 crc kubenswrapper[4848]: I0128 13:49:01.254574 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b"} err="failed to get container status \"6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b\": rpc error: code = NotFound desc = could not find container \"6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b\": container with ID starting with 6189f1e43003a519fb216bc4b50b3b89f95f1226b4ed9463ed0cf5da76a7c44b not found: ID does not exist" Jan 28 13:49:02 crc kubenswrapper[4848]: I0128 13:49:02.865793 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" path="/var/lib/kubelet/pods/4f13ae37-5673-4f84-ad92-c96f37f846a2/volumes" Jan 28 13:49:37 crc kubenswrapper[4848]: I0128 13:49:37.924838 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:49:37 crc kubenswrapper[4848]: I0128 13:49:37.925683 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:50:07 crc kubenswrapper[4848]: I0128 13:50:07.924477 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:50:07 crc kubenswrapper[4848]: I0128 13:50:07.925544 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:50:37 crc kubenswrapper[4848]: I0128 13:50:37.925196 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:50:37 crc kubenswrapper[4848]: I0128 13:50:37.925989 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:50:37 crc kubenswrapper[4848]: I0128 13:50:37.926058 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:50:37 crc kubenswrapper[4848]: I0128 13:50:37.927216 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:50:37 crc kubenswrapper[4848]: I0128 13:50:37.927303 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" gracePeriod=600 Jan 28 13:50:38 crc kubenswrapper[4848]: E0128 13:50:38.175161 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:50:38 crc kubenswrapper[4848]: I0128 13:50:38.301364 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" exitCode=0 Jan 28 13:50:38 crc kubenswrapper[4848]: I0128 13:50:38.301426 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8"} Jan 28 13:50:38 crc kubenswrapper[4848]: I0128 13:50:38.301476 4848 scope.go:117] "RemoveContainer" containerID="b49f692be73af62ca0362888898919d862b1f9f6a2be9652f34ed60d6ad42d0d" Jan 28 13:50:38 crc kubenswrapper[4848]: I0128 13:50:38.303284 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:50:38 crc kubenswrapper[4848]: E0128 13:50:38.304159 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:50:51 crc kubenswrapper[4848]: I0128 13:50:51.851145 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:50:51 crc kubenswrapper[4848]: E0128 13:50:51.852384 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:51:04 crc kubenswrapper[4848]: I0128 13:51:04.885501 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:51:04 crc kubenswrapper[4848]: E0128 13:51:04.889960 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:51:18 crc kubenswrapper[4848]: I0128 13:51:18.850365 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:51:18 crc kubenswrapper[4848]: E0128 13:51:18.851670 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:51:33 crc kubenswrapper[4848]: I0128 13:51:33.850720 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:51:33 crc kubenswrapper[4848]: E0128 13:51:33.851823 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:51:46 crc kubenswrapper[4848]: I0128 13:51:46.851142 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:51:46 crc kubenswrapper[4848]: E0128 13:51:46.852496 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:51:58 crc kubenswrapper[4848]: I0128 13:51:58.851461 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:51:58 crc kubenswrapper[4848]: E0128 13:51:58.852818 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:52:13 crc kubenswrapper[4848]: I0128 13:52:13.851512 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:52:13 crc kubenswrapper[4848]: E0128 13:52:13.852403 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:52:26 crc kubenswrapper[4848]: I0128 13:52:26.851358 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:52:26 crc kubenswrapper[4848]: E0128 13:52:26.852635 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:52:39 crc kubenswrapper[4848]: I0128 13:52:39.850962 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:52:39 crc kubenswrapper[4848]: E0128 13:52:39.852348 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:52:50 crc kubenswrapper[4848]: I0128 13:52:50.851207 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:52:50 crc kubenswrapper[4848]: E0128 13:52:50.852803 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:53:03 crc kubenswrapper[4848]: I0128 13:53:03.850744 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:53:03 crc kubenswrapper[4848]: E0128 13:53:03.851998 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:53:15 crc kubenswrapper[4848]: I0128 13:53:15.850644 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:53:15 crc kubenswrapper[4848]: E0128 13:53:15.851744 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:53:26 crc kubenswrapper[4848]: I0128 13:53:26.851384 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:53:26 crc kubenswrapper[4848]: E0128 13:53:26.852731 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:53:38 crc kubenswrapper[4848]: I0128 13:53:38.851608 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:53:38 crc kubenswrapper[4848]: E0128 13:53:38.853058 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:53:49 crc kubenswrapper[4848]: I0128 13:53:49.849852 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:53:49 crc kubenswrapper[4848]: E0128 13:53:49.851265 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:54:03 crc kubenswrapper[4848]: I0128 13:54:03.850460 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:54:03 crc kubenswrapper[4848]: E0128 13:54:03.852159 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:54:15 crc kubenswrapper[4848]: I0128 13:54:15.850655 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:54:15 crc kubenswrapper[4848]: E0128 13:54:15.851614 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:54:29 crc kubenswrapper[4848]: I0128 13:54:29.849932 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:54:29 crc kubenswrapper[4848]: E0128 13:54:29.850852 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:54:41 crc kubenswrapper[4848]: I0128 13:54:41.850479 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:54:41 crc kubenswrapper[4848]: E0128 13:54:41.851390 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:54:55 crc kubenswrapper[4848]: I0128 13:54:55.850655 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:54:55 crc kubenswrapper[4848]: E0128 13:54:55.851483 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:55:09 crc kubenswrapper[4848]: I0128 13:55:09.850425 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:55:09 crc kubenswrapper[4848]: E0128 13:55:09.851207 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:55:21 crc kubenswrapper[4848]: I0128 13:55:21.850461 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:55:21 crc kubenswrapper[4848]: E0128 13:55:21.851537 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.706236 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-prws2"] Jan 28 13:55:28 crc kubenswrapper[4848]: E0128 13:55:28.707394 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="registry-server" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.707416 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="registry-server" Jan 28 13:55:28 crc kubenswrapper[4848]: E0128 13:55:28.707448 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="extract-content" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.707457 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="extract-content" Jan 28 13:55:28 crc kubenswrapper[4848]: E0128 13:55:28.707471 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="extract-utilities" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.707480 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="extract-utilities" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.707765 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f13ae37-5673-4f84-ad92-c96f37f846a2" containerName="registry-server" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.729415 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-prws2"] Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.729568 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.840229 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-utilities\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.840362 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tskjz\" (UniqueName: \"kubernetes.io/projected/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-kube-api-access-tskjz\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.840488 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-catalog-content\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.943082 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tskjz\" (UniqueName: \"kubernetes.io/projected/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-kube-api-access-tskjz\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.943265 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-catalog-content\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.943446 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-utilities\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.943757 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-catalog-content\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:28 crc kubenswrapper[4848]: I0128 13:55:28.943827 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-utilities\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:29 crc kubenswrapper[4848]: I0128 13:55:29.379556 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tskjz\" (UniqueName: \"kubernetes.io/projected/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-kube-api-access-tskjz\") pod \"redhat-marketplace-prws2\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:29 crc kubenswrapper[4848]: I0128 13:55:29.656041 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:30 crc kubenswrapper[4848]: I0128 13:55:30.196316 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-prws2"] Jan 28 13:55:30 crc kubenswrapper[4848]: I0128 13:55:30.758717 4848 generic.go:334] "Generic (PLEG): container finished" podID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerID="248bfb6c1840c3347c25c4f7aa379c49da511b3cb2fa0c7d51dc06183f28adf4" exitCode=0 Jan 28 13:55:30 crc kubenswrapper[4848]: I0128 13:55:30.758782 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-prws2" event={"ID":"2a45bf0a-b9a4-4087-b272-c2adf38fbee0","Type":"ContainerDied","Data":"248bfb6c1840c3347c25c4f7aa379c49da511b3cb2fa0c7d51dc06183f28adf4"} Jan 28 13:55:30 crc kubenswrapper[4848]: I0128 13:55:30.758825 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-prws2" event={"ID":"2a45bf0a-b9a4-4087-b272-c2adf38fbee0","Type":"ContainerStarted","Data":"4a3669f2cae7bf59c94c5677ea5bd454db5ca9f7813c03184a0f8a648b488b3e"} Jan 28 13:55:30 crc kubenswrapper[4848]: I0128 13:55:30.791241 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:55:32 crc kubenswrapper[4848]: I0128 13:55:32.783788 4848 generic.go:334] "Generic (PLEG): container finished" podID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerID="5ce5ae75439a5799a64876eced34d736923fcf277017808597ea21c5394ad629" exitCode=0 Jan 28 13:55:32 crc kubenswrapper[4848]: I0128 13:55:32.783866 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-prws2" event={"ID":"2a45bf0a-b9a4-4087-b272-c2adf38fbee0","Type":"ContainerDied","Data":"5ce5ae75439a5799a64876eced34d736923fcf277017808597ea21c5394ad629"} Jan 28 13:55:33 crc kubenswrapper[4848]: I0128 13:55:33.800725 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-prws2" event={"ID":"2a45bf0a-b9a4-4087-b272-c2adf38fbee0","Type":"ContainerStarted","Data":"3e1a31b01c4b6ea3007a40af1baf6c0489752174649bc3d3086bbb6e3efe7784"} Jan 28 13:55:33 crc kubenswrapper[4848]: I0128 13:55:33.827103 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-prws2" podStartSLOduration=3.397902905 podStartE2EDuration="5.827076572s" podCreationTimestamp="2026-01-28 13:55:28 +0000 UTC" firstStartedPulling="2026-01-28 13:55:30.790782957 +0000 UTC m=+4157.702999995" lastFinishedPulling="2026-01-28 13:55:33.219956624 +0000 UTC m=+4160.132173662" observedRunningTime="2026-01-28 13:55:33.818279792 +0000 UTC m=+4160.730496870" watchObservedRunningTime="2026-01-28 13:55:33.827076572 +0000 UTC m=+4160.739293630" Jan 28 13:55:34 crc kubenswrapper[4848]: I0128 13:55:34.885086 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:55:34 crc kubenswrapper[4848]: E0128 13:55:34.885598 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 13:55:39 crc kubenswrapper[4848]: I0128 13:55:39.656954 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:39 crc kubenswrapper[4848]: I0128 13:55:39.657634 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:39 crc kubenswrapper[4848]: I0128 13:55:39.708319 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:39 crc kubenswrapper[4848]: I0128 13:55:39.930755 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:40 crc kubenswrapper[4848]: I0128 13:55:40.001263 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-prws2"] Jan 28 13:55:41 crc kubenswrapper[4848]: I0128 13:55:41.898634 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-prws2" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="registry-server" containerID="cri-o://3e1a31b01c4b6ea3007a40af1baf6c0489752174649bc3d3086bbb6e3efe7784" gracePeriod=2 Jan 28 13:55:42 crc kubenswrapper[4848]: I0128 13:55:42.914163 4848 generic.go:334] "Generic (PLEG): container finished" podID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerID="3e1a31b01c4b6ea3007a40af1baf6c0489752174649bc3d3086bbb6e3efe7784" exitCode=0 Jan 28 13:55:42 crc kubenswrapper[4848]: I0128 13:55:42.914257 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-prws2" event={"ID":"2a45bf0a-b9a4-4087-b272-c2adf38fbee0","Type":"ContainerDied","Data":"3e1a31b01c4b6ea3007a40af1baf6c0489752174649bc3d3086bbb6e3efe7784"} Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.202953 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.359685 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-utilities\") pod \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.360012 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tskjz\" (UniqueName: \"kubernetes.io/projected/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-kube-api-access-tskjz\") pod \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.360221 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-catalog-content\") pod \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\" (UID: \"2a45bf0a-b9a4-4087-b272-c2adf38fbee0\") " Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.361674 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-utilities" (OuterVolumeSpecName: "utilities") pod "2a45bf0a-b9a4-4087-b272-c2adf38fbee0" (UID: "2a45bf0a-b9a4-4087-b272-c2adf38fbee0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.367491 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-kube-api-access-tskjz" (OuterVolumeSpecName: "kube-api-access-tskjz") pod "2a45bf0a-b9a4-4087-b272-c2adf38fbee0" (UID: "2a45bf0a-b9a4-4087-b272-c2adf38fbee0"). InnerVolumeSpecName "kube-api-access-tskjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.384755 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a45bf0a-b9a4-4087-b272-c2adf38fbee0" (UID: "2a45bf0a-b9a4-4087-b272-c2adf38fbee0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.465738 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.465788 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tskjz\" (UniqueName: \"kubernetes.io/projected/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-kube-api-access-tskjz\") on node \"crc\" DevicePath \"\"" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.465805 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a45bf0a-b9a4-4087-b272-c2adf38fbee0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.934803 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-prws2" event={"ID":"2a45bf0a-b9a4-4087-b272-c2adf38fbee0","Type":"ContainerDied","Data":"4a3669f2cae7bf59c94c5677ea5bd454db5ca9f7813c03184a0f8a648b488b3e"} Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.935719 4848 scope.go:117] "RemoveContainer" containerID="3e1a31b01c4b6ea3007a40af1baf6c0489752174649bc3d3086bbb6e3efe7784" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.936076 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-prws2" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.972308 4848 scope.go:117] "RemoveContainer" containerID="5ce5ae75439a5799a64876eced34d736923fcf277017808597ea21c5394ad629" Jan 28 13:55:43 crc kubenswrapper[4848]: I0128 13:55:43.992219 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-prws2"] Jan 28 13:55:44 crc kubenswrapper[4848]: I0128 13:55:44.003756 4848 scope.go:117] "RemoveContainer" containerID="248bfb6c1840c3347c25c4f7aa379c49da511b3cb2fa0c7d51dc06183f28adf4" Jan 28 13:55:44 crc kubenswrapper[4848]: I0128 13:55:44.006565 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-prws2"] Jan 28 13:55:44 crc kubenswrapper[4848]: I0128 13:55:44.868201 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" path="/var/lib/kubelet/pods/2a45bf0a-b9a4-4087-b272-c2adf38fbee0/volumes" Jan 28 13:55:46 crc kubenswrapper[4848]: I0128 13:55:46.850893 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:55:47 crc kubenswrapper[4848]: I0128 13:55:47.994215 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"f3a1613f8da698c24073bc8cf248438a9b8a1a559ea22913495b79daf6463542"} Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.312031 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c986r"] Jan 28 13:56:29 crc kubenswrapper[4848]: E0128 13:56:29.313753 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="extract-utilities" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.313778 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="extract-utilities" Jan 28 13:56:29 crc kubenswrapper[4848]: E0128 13:56:29.313801 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="registry-server" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.313809 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="registry-server" Jan 28 13:56:29 crc kubenswrapper[4848]: E0128 13:56:29.313862 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="extract-content" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.313870 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="extract-content" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.314138 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a45bf0a-b9a4-4087-b272-c2adf38fbee0" containerName="registry-server" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.316402 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.327630 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c986r"] Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.454770 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-catalog-content\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.454876 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-utilities\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.455088 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klkj8\" (UniqueName: \"kubernetes.io/projected/f7042755-0bdc-49d4-a7c5-54b3487555ac-kube-api-access-klkj8\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.558121 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-catalog-content\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.558658 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-utilities\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.558914 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klkj8\" (UniqueName: \"kubernetes.io/projected/f7042755-0bdc-49d4-a7c5-54b3487555ac-kube-api-access-klkj8\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.558955 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-catalog-content\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.559319 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-utilities\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.585395 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klkj8\" (UniqueName: \"kubernetes.io/projected/f7042755-0bdc-49d4-a7c5-54b3487555ac-kube-api-access-klkj8\") pod \"certified-operators-c986r\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:29 crc kubenswrapper[4848]: I0128 13:56:29.677034 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:30 crc kubenswrapper[4848]: I0128 13:56:30.371491 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c986r"] Jan 28 13:56:30 crc kubenswrapper[4848]: I0128 13:56:30.493019 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c986r" event={"ID":"f7042755-0bdc-49d4-a7c5-54b3487555ac","Type":"ContainerStarted","Data":"bee849b76d18df1c8540dfef180ad7f35f29ebce690c995530f9d7c8dcdb3297"} Jan 28 13:56:31 crc kubenswrapper[4848]: I0128 13:56:31.508432 4848 generic.go:334] "Generic (PLEG): container finished" podID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerID="04ae5039987b6ba064f40b2a5efdfb5f91acfaa92c9d7f939c7d0552c0a322cf" exitCode=0 Jan 28 13:56:31 crc kubenswrapper[4848]: I0128 13:56:31.508553 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c986r" event={"ID":"f7042755-0bdc-49d4-a7c5-54b3487555ac","Type":"ContainerDied","Data":"04ae5039987b6ba064f40b2a5efdfb5f91acfaa92c9d7f939c7d0552c0a322cf"} Jan 28 13:56:32 crc kubenswrapper[4848]: I0128 13:56:32.523965 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c986r" event={"ID":"f7042755-0bdc-49d4-a7c5-54b3487555ac","Type":"ContainerStarted","Data":"86a2c6dfecba2f975260c70b63875c7742a97e8d75b9c6a3ad54d87680299f28"} Jan 28 13:56:33 crc kubenswrapper[4848]: I0128 13:56:33.536800 4848 generic.go:334] "Generic (PLEG): container finished" podID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerID="86a2c6dfecba2f975260c70b63875c7742a97e8d75b9c6a3ad54d87680299f28" exitCode=0 Jan 28 13:56:33 crc kubenswrapper[4848]: I0128 13:56:33.536862 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c986r" event={"ID":"f7042755-0bdc-49d4-a7c5-54b3487555ac","Type":"ContainerDied","Data":"86a2c6dfecba2f975260c70b63875c7742a97e8d75b9c6a3ad54d87680299f28"} Jan 28 13:56:34 crc kubenswrapper[4848]: I0128 13:56:34.552701 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c986r" event={"ID":"f7042755-0bdc-49d4-a7c5-54b3487555ac","Type":"ContainerStarted","Data":"e0df13ae71098424b1660e8dee3926ee3423c9d07db12e81179694d64b742795"} Jan 28 13:56:34 crc kubenswrapper[4848]: I0128 13:56:34.586719 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c986r" podStartSLOduration=2.964572594 podStartE2EDuration="5.586687551s" podCreationTimestamp="2026-01-28 13:56:29 +0000 UTC" firstStartedPulling="2026-01-28 13:56:31.511157099 +0000 UTC m=+4218.423374137" lastFinishedPulling="2026-01-28 13:56:34.133272056 +0000 UTC m=+4221.045489094" observedRunningTime="2026-01-28 13:56:34.575387083 +0000 UTC m=+4221.487604121" watchObservedRunningTime="2026-01-28 13:56:34.586687551 +0000 UTC m=+4221.498904589" Jan 28 13:56:39 crc kubenswrapper[4848]: I0128 13:56:39.677556 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:39 crc kubenswrapper[4848]: I0128 13:56:39.678587 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:40 crc kubenswrapper[4848]: I0128 13:56:40.231764 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:40 crc kubenswrapper[4848]: I0128 13:56:40.681770 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:40 crc kubenswrapper[4848]: I0128 13:56:40.760739 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c986r"] Jan 28 13:56:42 crc kubenswrapper[4848]: I0128 13:56:42.653703 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c986r" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="registry-server" containerID="cri-o://e0df13ae71098424b1660e8dee3926ee3423c9d07db12e81179694d64b742795" gracePeriod=2 Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.685810 4848 generic.go:334] "Generic (PLEG): container finished" podID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerID="e0df13ae71098424b1660e8dee3926ee3423c9d07db12e81179694d64b742795" exitCode=0 Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.685959 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c986r" event={"ID":"f7042755-0bdc-49d4-a7c5-54b3487555ac","Type":"ContainerDied","Data":"e0df13ae71098424b1660e8dee3926ee3423c9d07db12e81179694d64b742795"} Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.686760 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c986r" event={"ID":"f7042755-0bdc-49d4-a7c5-54b3487555ac","Type":"ContainerDied","Data":"bee849b76d18df1c8540dfef180ad7f35f29ebce690c995530f9d7c8dcdb3297"} Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.686791 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bee849b76d18df1c8540dfef180ad7f35f29ebce690c995530f9d7c8dcdb3297" Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.721821 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.881322 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klkj8\" (UniqueName: \"kubernetes.io/projected/f7042755-0bdc-49d4-a7c5-54b3487555ac-kube-api-access-klkj8\") pod \"f7042755-0bdc-49d4-a7c5-54b3487555ac\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.881393 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-utilities\") pod \"f7042755-0bdc-49d4-a7c5-54b3487555ac\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.881433 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-catalog-content\") pod \"f7042755-0bdc-49d4-a7c5-54b3487555ac\" (UID: \"f7042755-0bdc-49d4-a7c5-54b3487555ac\") " Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.882876 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-utilities" (OuterVolumeSpecName: "utilities") pod "f7042755-0bdc-49d4-a7c5-54b3487555ac" (UID: "f7042755-0bdc-49d4-a7c5-54b3487555ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.890716 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7042755-0bdc-49d4-a7c5-54b3487555ac-kube-api-access-klkj8" (OuterVolumeSpecName: "kube-api-access-klkj8") pod "f7042755-0bdc-49d4-a7c5-54b3487555ac" (UID: "f7042755-0bdc-49d4-a7c5-54b3487555ac"). InnerVolumeSpecName "kube-api-access-klkj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.953840 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7042755-0bdc-49d4-a7c5-54b3487555ac" (UID: "f7042755-0bdc-49d4-a7c5-54b3487555ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.984545 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klkj8\" (UniqueName: \"kubernetes.io/projected/f7042755-0bdc-49d4-a7c5-54b3487555ac-kube-api-access-klkj8\") on node \"crc\" DevicePath \"\"" Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.984585 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:56:43 crc kubenswrapper[4848]: I0128 13:56:43.984595 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7042755-0bdc-49d4-a7c5-54b3487555ac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:56:44 crc kubenswrapper[4848]: I0128 13:56:44.702564 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c986r" Jan 28 13:56:44 crc kubenswrapper[4848]: I0128 13:56:44.760106 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c986r"] Jan 28 13:56:44 crc kubenswrapper[4848]: I0128 13:56:44.774045 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c986r"] Jan 28 13:56:44 crc kubenswrapper[4848]: I0128 13:56:44.865042 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" path="/var/lib/kubelet/pods/f7042755-0bdc-49d4-a7c5-54b3487555ac/volumes" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.372223 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fr76g"] Jan 28 13:56:58 crc kubenswrapper[4848]: E0128 13:56:58.373709 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="extract-content" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.373728 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="extract-content" Jan 28 13:56:58 crc kubenswrapper[4848]: E0128 13:56:58.373774 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="registry-server" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.373781 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="registry-server" Jan 28 13:56:58 crc kubenswrapper[4848]: E0128 13:56:58.373808 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="extract-utilities" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.373815 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="extract-utilities" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.374061 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7042755-0bdc-49d4-a7c5-54b3487555ac" containerName="registry-server" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.375854 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.395020 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fr76g"] Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.532550 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-catalog-content\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.532693 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ddfh\" (UniqueName: \"kubernetes.io/projected/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-kube-api-access-2ddfh\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.532864 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-utilities\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.635354 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-catalog-content\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.635457 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ddfh\" (UniqueName: \"kubernetes.io/projected/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-kube-api-access-2ddfh\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.635595 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-utilities\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.636468 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-utilities\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.636587 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-catalog-content\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.664531 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ddfh\" (UniqueName: \"kubernetes.io/projected/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-kube-api-access-2ddfh\") pod \"redhat-operators-fr76g\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:58 crc kubenswrapper[4848]: I0128 13:56:58.713601 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:56:59 crc kubenswrapper[4848]: I0128 13:56:59.398846 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fr76g"] Jan 28 13:56:59 crc kubenswrapper[4848]: I0128 13:56:59.904905 4848 generic.go:334] "Generic (PLEG): container finished" podID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerID="a1713ae97c7be962d729889279e0d386697c6ff5fc3ab7f976b0589bc534e789" exitCode=0 Jan 28 13:56:59 crc kubenswrapper[4848]: I0128 13:56:59.905029 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fr76g" event={"ID":"2a214ebd-e8ab-4d3e-9da2-439e944a8db3","Type":"ContainerDied","Data":"a1713ae97c7be962d729889279e0d386697c6ff5fc3ab7f976b0589bc534e789"} Jan 28 13:56:59 crc kubenswrapper[4848]: I0128 13:56:59.905483 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fr76g" event={"ID":"2a214ebd-e8ab-4d3e-9da2-439e944a8db3","Type":"ContainerStarted","Data":"a2c2a0b7a0f3442efaabfc06fa105611e27c3a87be180394830b8e5fb60c07bf"} Jan 28 13:57:01 crc kubenswrapper[4848]: I0128 13:57:01.932110 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fr76g" event={"ID":"2a214ebd-e8ab-4d3e-9da2-439e944a8db3","Type":"ContainerStarted","Data":"944d8c2606f53124f7150afd976e176f35abc241645c96299d26dc10b5811bc5"} Jan 28 13:57:09 crc kubenswrapper[4848]: I0128 13:57:09.014119 4848 generic.go:334] "Generic (PLEG): container finished" podID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerID="944d8c2606f53124f7150afd976e176f35abc241645c96299d26dc10b5811bc5" exitCode=0 Jan 28 13:57:09 crc kubenswrapper[4848]: I0128 13:57:09.014290 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fr76g" event={"ID":"2a214ebd-e8ab-4d3e-9da2-439e944a8db3","Type":"ContainerDied","Data":"944d8c2606f53124f7150afd976e176f35abc241645c96299d26dc10b5811bc5"} Jan 28 13:57:11 crc kubenswrapper[4848]: I0128 13:57:11.041902 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fr76g" event={"ID":"2a214ebd-e8ab-4d3e-9da2-439e944a8db3","Type":"ContainerStarted","Data":"f8d4835781e367a7a000817272537099755cf350e859872a2cfdc3a5b438535a"} Jan 28 13:57:11 crc kubenswrapper[4848]: I0128 13:57:11.069122 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fr76g" podStartSLOduration=3.205348692 podStartE2EDuration="13.069092728s" podCreationTimestamp="2026-01-28 13:56:58 +0000 UTC" firstStartedPulling="2026-01-28 13:56:59.907522749 +0000 UTC m=+4246.819739777" lastFinishedPulling="2026-01-28 13:57:09.771266775 +0000 UTC m=+4256.683483813" observedRunningTime="2026-01-28 13:57:11.067136314 +0000 UTC m=+4257.979353362" watchObservedRunningTime="2026-01-28 13:57:11.069092728 +0000 UTC m=+4257.981309766" Jan 28 13:57:18 crc kubenswrapper[4848]: I0128 13:57:18.715139 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:57:18 crc kubenswrapper[4848]: I0128 13:57:18.716145 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:57:19 crc kubenswrapper[4848]: I0128 13:57:19.783136 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fr76g" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="registry-server" probeResult="failure" output=< Jan 28 13:57:19 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:57:19 crc kubenswrapper[4848]: > Jan 28 13:57:29 crc kubenswrapper[4848]: I0128 13:57:29.767434 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fr76g" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="registry-server" probeResult="failure" output=< Jan 28 13:57:29 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 13:57:29 crc kubenswrapper[4848]: > Jan 28 13:57:38 crc kubenswrapper[4848]: I0128 13:57:38.809608 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:57:38 crc kubenswrapper[4848]: I0128 13:57:38.870730 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:57:39 crc kubenswrapper[4848]: I0128 13:57:39.060467 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fr76g"] Jan 28 13:57:40 crc kubenswrapper[4848]: I0128 13:57:40.501352 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fr76g" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="registry-server" containerID="cri-o://f8d4835781e367a7a000817272537099755cf350e859872a2cfdc3a5b438535a" gracePeriod=2 Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.609432 4848 generic.go:334] "Generic (PLEG): container finished" podID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerID="f8d4835781e367a7a000817272537099755cf350e859872a2cfdc3a5b438535a" exitCode=0 Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.609932 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fr76g" event={"ID":"2a214ebd-e8ab-4d3e-9da2-439e944a8db3","Type":"ContainerDied","Data":"f8d4835781e367a7a000817272537099755cf350e859872a2cfdc3a5b438535a"} Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.816290 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.954975 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-catalog-content\") pod \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.955078 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-utilities\") pod \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.955297 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ddfh\" (UniqueName: \"kubernetes.io/projected/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-kube-api-access-2ddfh\") pod \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\" (UID: \"2a214ebd-e8ab-4d3e-9da2-439e944a8db3\") " Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.959266 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-utilities" (OuterVolumeSpecName: "utilities") pod "2a214ebd-e8ab-4d3e-9da2-439e944a8db3" (UID: "2a214ebd-e8ab-4d3e-9da2-439e944a8db3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:57:41 crc kubenswrapper[4848]: I0128 13:57:41.971703 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-kube-api-access-2ddfh" (OuterVolumeSpecName: "kube-api-access-2ddfh") pod "2a214ebd-e8ab-4d3e-9da2-439e944a8db3" (UID: "2a214ebd-e8ab-4d3e-9da2-439e944a8db3"). InnerVolumeSpecName "kube-api-access-2ddfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.058536 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ddfh\" (UniqueName: \"kubernetes.io/projected/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-kube-api-access-2ddfh\") on node \"crc\" DevicePath \"\"" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.059057 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.117768 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a214ebd-e8ab-4d3e-9da2-439e944a8db3" (UID: "2a214ebd-e8ab-4d3e-9da2-439e944a8db3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.161962 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a214ebd-e8ab-4d3e-9da2-439e944a8db3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.631010 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fr76g" event={"ID":"2a214ebd-e8ab-4d3e-9da2-439e944a8db3","Type":"ContainerDied","Data":"a2c2a0b7a0f3442efaabfc06fa105611e27c3a87be180394830b8e5fb60c07bf"} Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.631107 4848 scope.go:117] "RemoveContainer" containerID="f8d4835781e367a7a000817272537099755cf350e859872a2cfdc3a5b438535a" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.631188 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fr76g" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.658186 4848 scope.go:117] "RemoveContainer" containerID="944d8c2606f53124f7150afd976e176f35abc241645c96299d26dc10b5811bc5" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.692069 4848 scope.go:117] "RemoveContainer" containerID="a1713ae97c7be962d729889279e0d386697c6ff5fc3ab7f976b0589bc534e789" Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.818519 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fr76g"] Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.830557 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fr76g"] Jan 28 13:57:42 crc kubenswrapper[4848]: I0128 13:57:42.867220 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" path="/var/lib/kubelet/pods/2a214ebd-e8ab-4d3e-9da2-439e944a8db3/volumes" Jan 28 13:58:07 crc kubenswrapper[4848]: I0128 13:58:07.924173 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:58:07 crc kubenswrapper[4848]: I0128 13:58:07.926628 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:58:37 crc kubenswrapper[4848]: I0128 13:58:37.924311 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:58:37 crc kubenswrapper[4848]: I0128 13:58:37.925041 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:59:07 crc kubenswrapper[4848]: I0128 13:59:07.924638 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 13:59:07 crc kubenswrapper[4848]: I0128 13:59:07.925441 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 13:59:07 crc kubenswrapper[4848]: I0128 13:59:07.925495 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 13:59:07 crc kubenswrapper[4848]: I0128 13:59:07.926502 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f3a1613f8da698c24073bc8cf248438a9b8a1a559ea22913495b79daf6463542"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 13:59:07 crc kubenswrapper[4848]: I0128 13:59:07.926570 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://f3a1613f8da698c24073bc8cf248438a9b8a1a559ea22913495b79daf6463542" gracePeriod=600 Jan 28 13:59:08 crc kubenswrapper[4848]: I0128 13:59:08.689198 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="f3a1613f8da698c24073bc8cf248438a9b8a1a559ea22913495b79daf6463542" exitCode=0 Jan 28 13:59:08 crc kubenswrapper[4848]: I0128 13:59:08.689872 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"f3a1613f8da698c24073bc8cf248438a9b8a1a559ea22913495b79daf6463542"} Jan 28 13:59:08 crc kubenswrapper[4848]: I0128 13:59:08.689929 4848 scope.go:117] "RemoveContainer" containerID="6f87de2a704f3c17b25fa0af665dc1e4798126615ca62f82a1e20aa4bd80c6f8" Jan 28 13:59:09 crc kubenswrapper[4848]: I0128 13:59:09.703881 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab"} Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.250729 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz"] Jan 28 14:00:00 crc kubenswrapper[4848]: E0128 14:00:00.252467 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="extract-utilities" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.252489 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="extract-utilities" Jan 28 14:00:00 crc kubenswrapper[4848]: E0128 14:00:00.252512 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="registry-server" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.252521 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="registry-server" Jan 28 14:00:00 crc kubenswrapper[4848]: E0128 14:00:00.252560 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="extract-content" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.252573 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="extract-content" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.252856 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a214ebd-e8ab-4d3e-9da2-439e944a8db3" containerName="registry-server" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.254033 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.256677 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.258229 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.283024 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz"] Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.353656 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zjcp\" (UniqueName: \"kubernetes.io/projected/357aa36c-134b-431b-b041-5f04284770d0-kube-api-access-7zjcp\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.353818 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/357aa36c-134b-431b-b041-5f04284770d0-secret-volume\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.353847 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/357aa36c-134b-431b-b041-5f04284770d0-config-volume\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.456812 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/357aa36c-134b-431b-b041-5f04284770d0-secret-volume\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.457015 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/357aa36c-134b-431b-b041-5f04284770d0-config-volume\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.457210 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zjcp\" (UniqueName: \"kubernetes.io/projected/357aa36c-134b-431b-b041-5f04284770d0-kube-api-access-7zjcp\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.458419 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/357aa36c-134b-431b-b041-5f04284770d0-config-volume\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.467189 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/357aa36c-134b-431b-b041-5f04284770d0-secret-volume\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.480948 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zjcp\" (UniqueName: \"kubernetes.io/projected/357aa36c-134b-431b-b041-5f04284770d0-kube-api-access-7zjcp\") pod \"collect-profiles-29493480-c4bmz\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:00 crc kubenswrapper[4848]: I0128 14:00:00.584928 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:01 crc kubenswrapper[4848]: I0128 14:00:01.151939 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz"] Jan 28 14:00:01 crc kubenswrapper[4848]: I0128 14:00:01.304837 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" event={"ID":"357aa36c-134b-431b-b041-5f04284770d0","Type":"ContainerStarted","Data":"981728a52b2d5fef9d3f0700fd3eecc042655986470f78116847010c70f57fd2"} Jan 28 14:00:02 crc kubenswrapper[4848]: I0128 14:00:02.326631 4848 generic.go:334] "Generic (PLEG): container finished" podID="357aa36c-134b-431b-b041-5f04284770d0" containerID="7bb417d16cf5331595b8200c90138283c8e98d20ec52e7116aefa87dbdf9d0fe" exitCode=0 Jan 28 14:00:02 crc kubenswrapper[4848]: I0128 14:00:02.326768 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" event={"ID":"357aa36c-134b-431b-b041-5f04284770d0","Type":"ContainerDied","Data":"7bb417d16cf5331595b8200c90138283c8e98d20ec52e7116aefa87dbdf9d0fe"} Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.755324 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.883036 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/357aa36c-134b-431b-b041-5f04284770d0-config-volume\") pod \"357aa36c-134b-431b-b041-5f04284770d0\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.883212 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/357aa36c-134b-431b-b041-5f04284770d0-secret-volume\") pod \"357aa36c-134b-431b-b041-5f04284770d0\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.883472 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zjcp\" (UniqueName: \"kubernetes.io/projected/357aa36c-134b-431b-b041-5f04284770d0-kube-api-access-7zjcp\") pod \"357aa36c-134b-431b-b041-5f04284770d0\" (UID: \"357aa36c-134b-431b-b041-5f04284770d0\") " Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.884224 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/357aa36c-134b-431b-b041-5f04284770d0-config-volume" (OuterVolumeSpecName: "config-volume") pod "357aa36c-134b-431b-b041-5f04284770d0" (UID: "357aa36c-134b-431b-b041-5f04284770d0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.895461 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/357aa36c-134b-431b-b041-5f04284770d0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "357aa36c-134b-431b-b041-5f04284770d0" (UID: "357aa36c-134b-431b-b041-5f04284770d0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.895643 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/357aa36c-134b-431b-b041-5f04284770d0-kube-api-access-7zjcp" (OuterVolumeSpecName: "kube-api-access-7zjcp") pod "357aa36c-134b-431b-b041-5f04284770d0" (UID: "357aa36c-134b-431b-b041-5f04284770d0"). InnerVolumeSpecName "kube-api-access-7zjcp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.986995 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/357aa36c-134b-431b-b041-5f04284770d0-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.987058 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/357aa36c-134b-431b-b041-5f04284770d0-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:00:03 crc kubenswrapper[4848]: I0128 14:00:03.987079 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zjcp\" (UniqueName: \"kubernetes.io/projected/357aa36c-134b-431b-b041-5f04284770d0-kube-api-access-7zjcp\") on node \"crc\" DevicePath \"\"" Jan 28 14:00:04 crc kubenswrapper[4848]: I0128 14:00:04.355326 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" event={"ID":"357aa36c-134b-431b-b041-5f04284770d0","Type":"ContainerDied","Data":"981728a52b2d5fef9d3f0700fd3eecc042655986470f78116847010c70f57fd2"} Jan 28 14:00:04 crc kubenswrapper[4848]: I0128 14:00:04.355398 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz" Jan 28 14:00:04 crc kubenswrapper[4848]: I0128 14:00:04.355402 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="981728a52b2d5fef9d3f0700fd3eecc042655986470f78116847010c70f57fd2" Jan 28 14:00:04 crc kubenswrapper[4848]: I0128 14:00:04.844817 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n"] Jan 28 14:00:04 crc kubenswrapper[4848]: I0128 14:00:04.873089 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493435-ch48n"] Jan 28 14:00:06 crc kubenswrapper[4848]: I0128 14:00:06.865705 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="262cf1c7-aa29-451d-b27c-8df1174110f1" path="/var/lib/kubelet/pods/262cf1c7-aa29-451d-b27c-8df1174110f1/volumes" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.646041 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pf5b9"] Jan 28 14:00:08 crc kubenswrapper[4848]: E0128 14:00:08.647190 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="357aa36c-134b-431b-b041-5f04284770d0" containerName="collect-profiles" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.647210 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="357aa36c-134b-431b-b041-5f04284770d0" containerName="collect-profiles" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.647497 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="357aa36c-134b-431b-b041-5f04284770d0" containerName="collect-profiles" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.649782 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.665129 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pf5b9"] Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.808913 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-catalog-content\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.809008 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzq9w\" (UniqueName: \"kubernetes.io/projected/661e0c1b-d836-4803-ac74-3b559e0d046d-kube-api-access-dzq9w\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.809984 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-utilities\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.912611 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-utilities\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.912736 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-catalog-content\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.912805 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzq9w\" (UniqueName: \"kubernetes.io/projected/661e0c1b-d836-4803-ac74-3b559e0d046d-kube-api-access-dzq9w\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.913266 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-catalog-content\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.913530 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-utilities\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.935175 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzq9w\" (UniqueName: \"kubernetes.io/projected/661e0c1b-d836-4803-ac74-3b559e0d046d-kube-api-access-dzq9w\") pod \"community-operators-pf5b9\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:08 crc kubenswrapper[4848]: I0128 14:00:08.975118 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:09 crc kubenswrapper[4848]: I0128 14:00:09.607414 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pf5b9"] Jan 28 14:00:10 crc kubenswrapper[4848]: I0128 14:00:10.432812 4848 generic.go:334] "Generic (PLEG): container finished" podID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerID="ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0" exitCode=0 Jan 28 14:00:10 crc kubenswrapper[4848]: I0128 14:00:10.432939 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pf5b9" event={"ID":"661e0c1b-d836-4803-ac74-3b559e0d046d","Type":"ContainerDied","Data":"ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0"} Jan 28 14:00:10 crc kubenswrapper[4848]: I0128 14:00:10.433161 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pf5b9" event={"ID":"661e0c1b-d836-4803-ac74-3b559e0d046d","Type":"ContainerStarted","Data":"11e13cd16f13e4763f05b21b9d80a0b61d1cf07ca925429cd0dbf301eab182d7"} Jan 28 14:00:13 crc kubenswrapper[4848]: I0128 14:00:13.467971 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pf5b9" event={"ID":"661e0c1b-d836-4803-ac74-3b559e0d046d","Type":"ContainerStarted","Data":"d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f"} Jan 28 14:00:18 crc kubenswrapper[4848]: I0128 14:00:18.532000 4848 generic.go:334] "Generic (PLEG): container finished" podID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerID="d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f" exitCode=0 Jan 28 14:00:18 crc kubenswrapper[4848]: I0128 14:00:18.532709 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pf5b9" event={"ID":"661e0c1b-d836-4803-ac74-3b559e0d046d","Type":"ContainerDied","Data":"d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f"} Jan 28 14:00:22 crc kubenswrapper[4848]: I0128 14:00:22.581391 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pf5b9" event={"ID":"661e0c1b-d836-4803-ac74-3b559e0d046d","Type":"ContainerStarted","Data":"e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852"} Jan 28 14:00:22 crc kubenswrapper[4848]: I0128 14:00:22.615826 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pf5b9" podStartSLOduration=3.753050909 podStartE2EDuration="14.615797605s" podCreationTimestamp="2026-01-28 14:00:08 +0000 UTC" firstStartedPulling="2026-01-28 14:00:10.434761558 +0000 UTC m=+4437.346978596" lastFinishedPulling="2026-01-28 14:00:21.297508254 +0000 UTC m=+4448.209725292" observedRunningTime="2026-01-28 14:00:22.604306182 +0000 UTC m=+4449.516523260" watchObservedRunningTime="2026-01-28 14:00:22.615797605 +0000 UTC m=+4449.528014643" Jan 28 14:00:28 crc kubenswrapper[4848]: I0128 14:00:28.976447 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:28 crc kubenswrapper[4848]: I0128 14:00:28.977001 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:29 crc kubenswrapper[4848]: I0128 14:00:29.033913 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:29 crc kubenswrapper[4848]: I0128 14:00:29.703474 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:29 crc kubenswrapper[4848]: I0128 14:00:29.766134 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pf5b9"] Jan 28 14:00:31 crc kubenswrapper[4848]: I0128 14:00:31.671810 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pf5b9" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="registry-server" containerID="cri-o://e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852" gracePeriod=2 Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.190166 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.291863 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-utilities\") pod \"661e0c1b-d836-4803-ac74-3b559e0d046d\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.292304 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-catalog-content\") pod \"661e0c1b-d836-4803-ac74-3b559e0d046d\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.292372 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzq9w\" (UniqueName: \"kubernetes.io/projected/661e0c1b-d836-4803-ac74-3b559e0d046d-kube-api-access-dzq9w\") pod \"661e0c1b-d836-4803-ac74-3b559e0d046d\" (UID: \"661e0c1b-d836-4803-ac74-3b559e0d046d\") " Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.293279 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-utilities" (OuterVolumeSpecName: "utilities") pod "661e0c1b-d836-4803-ac74-3b559e0d046d" (UID: "661e0c1b-d836-4803-ac74-3b559e0d046d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.298243 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/661e0c1b-d836-4803-ac74-3b559e0d046d-kube-api-access-dzq9w" (OuterVolumeSpecName: "kube-api-access-dzq9w") pod "661e0c1b-d836-4803-ac74-3b559e0d046d" (UID: "661e0c1b-d836-4803-ac74-3b559e0d046d"). InnerVolumeSpecName "kube-api-access-dzq9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.349665 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "661e0c1b-d836-4803-ac74-3b559e0d046d" (UID: "661e0c1b-d836-4803-ac74-3b559e0d046d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.395204 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.395239 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzq9w\" (UniqueName: \"kubernetes.io/projected/661e0c1b-d836-4803-ac74-3b559e0d046d-kube-api-access-dzq9w\") on node \"crc\" DevicePath \"\"" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.395270 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/661e0c1b-d836-4803-ac74-3b559e0d046d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.689681 4848 generic.go:334] "Generic (PLEG): container finished" podID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerID="e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852" exitCode=0 Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.689770 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pf5b9" event={"ID":"661e0c1b-d836-4803-ac74-3b559e0d046d","Type":"ContainerDied","Data":"e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852"} Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.689791 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pf5b9" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.689969 4848 scope.go:117] "RemoveContainer" containerID="e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.689951 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pf5b9" event={"ID":"661e0c1b-d836-4803-ac74-3b559e0d046d","Type":"ContainerDied","Data":"11e13cd16f13e4763f05b21b9d80a0b61d1cf07ca925429cd0dbf301eab182d7"} Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.711834 4848 scope.go:117] "RemoveContainer" containerID="d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f" Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.744787 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pf5b9"] Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.762841 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pf5b9"] Jan 28 14:00:32 crc kubenswrapper[4848]: I0128 14:00:32.865844 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" path="/var/lib/kubelet/pods/661e0c1b-d836-4803-ac74-3b559e0d046d/volumes" Jan 28 14:00:33 crc kubenswrapper[4848]: I0128 14:00:33.199363 4848 scope.go:117] "RemoveContainer" containerID="ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0" Jan 28 14:00:33 crc kubenswrapper[4848]: I0128 14:00:33.240504 4848 scope.go:117] "RemoveContainer" containerID="e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852" Jan 28 14:00:33 crc kubenswrapper[4848]: E0128 14:00:33.241066 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852\": container with ID starting with e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852 not found: ID does not exist" containerID="e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852" Jan 28 14:00:33 crc kubenswrapper[4848]: I0128 14:00:33.241103 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852"} err="failed to get container status \"e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852\": rpc error: code = NotFound desc = could not find container \"e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852\": container with ID starting with e51f0dbac3cea9c2e1ce0d7dda7f9d41495b40a3eef756d3a44da1172a89d852 not found: ID does not exist" Jan 28 14:00:33 crc kubenswrapper[4848]: I0128 14:00:33.241130 4848 scope.go:117] "RemoveContainer" containerID="d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f" Jan 28 14:00:33 crc kubenswrapper[4848]: E0128 14:00:33.241450 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f\": container with ID starting with d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f not found: ID does not exist" containerID="d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f" Jan 28 14:00:33 crc kubenswrapper[4848]: I0128 14:00:33.241473 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f"} err="failed to get container status \"d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f\": rpc error: code = NotFound desc = could not find container \"d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f\": container with ID starting with d6d633719c70124cf1726c5b1dcb3020517ec1db2c3c0bd1f8458b75b5f3947f not found: ID does not exist" Jan 28 14:00:33 crc kubenswrapper[4848]: I0128 14:00:33.241487 4848 scope.go:117] "RemoveContainer" containerID="ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0" Jan 28 14:00:33 crc kubenswrapper[4848]: E0128 14:00:33.241810 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0\": container with ID starting with ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0 not found: ID does not exist" containerID="ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0" Jan 28 14:00:33 crc kubenswrapper[4848]: I0128 14:00:33.241846 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0"} err="failed to get container status \"ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0\": rpc error: code = NotFound desc = could not find container \"ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0\": container with ID starting with ee8aa028f7010cb7b55e41c81ff4c532a4dafff5980ad703b4b4d8ea9f984dc0 not found: ID does not exist" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.172850 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29493481-q89gd"] Jan 28 14:01:00 crc kubenswrapper[4848]: E0128 14:01:00.174065 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="extract-content" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.174087 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="extract-content" Jan 28 14:01:00 crc kubenswrapper[4848]: E0128 14:01:00.174109 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="registry-server" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.174119 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="registry-server" Jan 28 14:01:00 crc kubenswrapper[4848]: E0128 14:01:00.174136 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="extract-utilities" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.174145 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="extract-utilities" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.174404 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="661e0c1b-d836-4803-ac74-3b559e0d046d" containerName="registry-server" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.175395 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.186212 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493481-q89gd"] Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.268352 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk5c4\" (UniqueName: \"kubernetes.io/projected/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-kube-api-access-hk5c4\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.268965 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-combined-ca-bundle\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.269049 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-config-data\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.269168 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-fernet-keys\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.372051 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-fernet-keys\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.372145 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk5c4\" (UniqueName: \"kubernetes.io/projected/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-kube-api-access-hk5c4\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.372288 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-combined-ca-bundle\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.372337 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-config-data\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.380122 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-fernet-keys\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.380206 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-combined-ca-bundle\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.392542 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-config-data\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.394502 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk5c4\" (UniqueName: \"kubernetes.io/projected/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-kube-api-access-hk5c4\") pod \"keystone-cron-29493481-q89gd\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:00 crc kubenswrapper[4848]: I0128 14:01:00.539129 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:01 crc kubenswrapper[4848]: I0128 14:01:01.040568 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493481-q89gd"] Jan 28 14:01:02 crc kubenswrapper[4848]: I0128 14:01:02.010113 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493481-q89gd" event={"ID":"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502","Type":"ContainerStarted","Data":"654eca6fc720c3a7c434745bbf1abf835dd61484b095557bdce033ed3271c5b1"} Jan 28 14:01:02 crc kubenswrapper[4848]: I0128 14:01:02.010556 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493481-q89gd" event={"ID":"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502","Type":"ContainerStarted","Data":"a979a854d8a82526a7e0e2bd81b9b326254fa17a640b75de0aa033c19aa7a7f3"} Jan 28 14:01:02 crc kubenswrapper[4848]: I0128 14:01:02.047874 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29493481-q89gd" podStartSLOduration=2.047843393 podStartE2EDuration="2.047843393s" podCreationTimestamp="2026-01-28 14:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 14:01:02.038157968 +0000 UTC m=+4488.950375046" watchObservedRunningTime="2026-01-28 14:01:02.047843393 +0000 UTC m=+4488.960060451" Jan 28 14:01:05 crc kubenswrapper[4848]: E0128 14:01:05.889592 4848 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b8bd23c_f6a7_4d2d_9d6d_86b5eae94502.slice/crio-conmon-654eca6fc720c3a7c434745bbf1abf835dd61484b095557bdce033ed3271c5b1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b8bd23c_f6a7_4d2d_9d6d_86b5eae94502.slice/crio-654eca6fc720c3a7c434745bbf1abf835dd61484b095557bdce033ed3271c5b1.scope\": RecentStats: unable to find data in memory cache]" Jan 28 14:01:06 crc kubenswrapper[4848]: I0128 14:01:06.060459 4848 generic.go:334] "Generic (PLEG): container finished" podID="7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" containerID="654eca6fc720c3a7c434745bbf1abf835dd61484b095557bdce033ed3271c5b1" exitCode=0 Jan 28 14:01:06 crc kubenswrapper[4848]: I0128 14:01:06.060578 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493481-q89gd" event={"ID":"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502","Type":"ContainerDied","Data":"654eca6fc720c3a7c434745bbf1abf835dd61484b095557bdce033ed3271c5b1"} Jan 28 14:01:06 crc kubenswrapper[4848]: I0128 14:01:06.844390 4848 scope.go:117] "RemoveContainer" containerID="ccc60d17a7aa56bf24a87dc8495b8207b9aae3906a76f2a7f744e56cddf103f9" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.451209 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.609364 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk5c4\" (UniqueName: \"kubernetes.io/projected/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-kube-api-access-hk5c4\") pod \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.609447 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-config-data\") pod \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.609510 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-combined-ca-bundle\") pod \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.609618 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-fernet-keys\") pod \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\" (UID: \"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502\") " Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.616652 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" (UID: "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.619484 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-kube-api-access-hk5c4" (OuterVolumeSpecName: "kube-api-access-hk5c4") pod "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" (UID: "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502"). InnerVolumeSpecName "kube-api-access-hk5c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.646988 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" (UID: "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.682776 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-config-data" (OuterVolumeSpecName: "config-data") pod "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" (UID: "7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.713168 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk5c4\" (UniqueName: \"kubernetes.io/projected/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-kube-api-access-hk5c4\") on node \"crc\" DevicePath \"\"" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.713219 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.713234 4848 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 14:01:07 crc kubenswrapper[4848]: I0128 14:01:07.713259 4848 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 14:01:08 crc kubenswrapper[4848]: I0128 14:01:08.083622 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493481-q89gd" event={"ID":"7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502","Type":"ContainerDied","Data":"a979a854d8a82526a7e0e2bd81b9b326254fa17a640b75de0aa033c19aa7a7f3"} Jan 28 14:01:08 crc kubenswrapper[4848]: I0128 14:01:08.083670 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a979a854d8a82526a7e0e2bd81b9b326254fa17a640b75de0aa033c19aa7a7f3" Jan 28 14:01:08 crc kubenswrapper[4848]: I0128 14:01:08.083683 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493481-q89gd" Jan 28 14:01:37 crc kubenswrapper[4848]: I0128 14:01:37.925198 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:01:37 crc kubenswrapper[4848]: I0128 14:01:37.926838 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:02:07 crc kubenswrapper[4848]: I0128 14:02:07.924690 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:02:07 crc kubenswrapper[4848]: I0128 14:02:07.925609 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:02:37 crc kubenswrapper[4848]: I0128 14:02:37.924634 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:02:37 crc kubenswrapper[4848]: I0128 14:02:37.925516 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:02:37 crc kubenswrapper[4848]: I0128 14:02:37.925572 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:02:37 crc kubenswrapper[4848]: I0128 14:02:37.926433 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:02:37 crc kubenswrapper[4848]: I0128 14:02:37.926486 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" gracePeriod=600 Jan 28 14:02:38 crc kubenswrapper[4848]: E0128 14:02:38.186687 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:02:38 crc kubenswrapper[4848]: I0128 14:02:38.192853 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" exitCode=0 Jan 28 14:02:38 crc kubenswrapper[4848]: I0128 14:02:38.192918 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab"} Jan 28 14:02:38 crc kubenswrapper[4848]: I0128 14:02:38.192992 4848 scope.go:117] "RemoveContainer" containerID="f3a1613f8da698c24073bc8cf248438a9b8a1a559ea22913495b79daf6463542" Jan 28 14:02:38 crc kubenswrapper[4848]: I0128 14:02:38.193913 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:02:38 crc kubenswrapper[4848]: E0128 14:02:38.194314 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:02:52 crc kubenswrapper[4848]: I0128 14:02:52.850898 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:02:52 crc kubenswrapper[4848]: E0128 14:02:52.853896 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:03:04 crc kubenswrapper[4848]: I0128 14:03:04.858773 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:03:04 crc kubenswrapper[4848]: E0128 14:03:04.859587 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:03:06 crc kubenswrapper[4848]: I0128 14:03:06.958532 4848 scope.go:117] "RemoveContainer" containerID="04ae5039987b6ba064f40b2a5efdfb5f91acfaa92c9d7f939c7d0552c0a322cf" Jan 28 14:03:06 crc kubenswrapper[4848]: I0128 14:03:06.988674 4848 scope.go:117] "RemoveContainer" containerID="e0df13ae71098424b1660e8dee3926ee3423c9d07db12e81179694d64b742795" Jan 28 14:03:07 crc kubenswrapper[4848]: I0128 14:03:07.055547 4848 scope.go:117] "RemoveContainer" containerID="86a2c6dfecba2f975260c70b63875c7742a97e8d75b9c6a3ad54d87680299f28" Jan 28 14:03:17 crc kubenswrapper[4848]: I0128 14:03:17.852121 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:03:17 crc kubenswrapper[4848]: E0128 14:03:17.853728 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:03:28 crc kubenswrapper[4848]: I0128 14:03:28.853901 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:03:28 crc kubenswrapper[4848]: E0128 14:03:28.855047 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:03:43 crc kubenswrapper[4848]: I0128 14:03:43.851726 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:03:43 crc kubenswrapper[4848]: E0128 14:03:43.853077 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:03:58 crc kubenswrapper[4848]: I0128 14:03:58.851387 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:03:58 crc kubenswrapper[4848]: E0128 14:03:58.852922 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:04:13 crc kubenswrapper[4848]: I0128 14:04:13.851307 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:04:13 crc kubenswrapper[4848]: E0128 14:04:13.853756 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:04:28 crc kubenswrapper[4848]: I0128 14:04:28.850402 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:04:28 crc kubenswrapper[4848]: E0128 14:04:28.851388 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:04:42 crc kubenswrapper[4848]: I0128 14:04:42.851051 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:04:42 crc kubenswrapper[4848]: E0128 14:04:42.851864 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:04:54 crc kubenswrapper[4848]: I0128 14:04:54.860051 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:04:54 crc kubenswrapper[4848]: E0128 14:04:54.861590 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:05:06 crc kubenswrapper[4848]: I0128 14:05:06.850676 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:05:06 crc kubenswrapper[4848]: E0128 14:05:06.852033 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:05:19 crc kubenswrapper[4848]: I0128 14:05:19.850200 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:05:19 crc kubenswrapper[4848]: E0128 14:05:19.851379 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:05:31 crc kubenswrapper[4848]: I0128 14:05:31.851671 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:05:31 crc kubenswrapper[4848]: E0128 14:05:31.852786 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:05:42 crc kubenswrapper[4848]: I0128 14:05:42.850814 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:05:42 crc kubenswrapper[4848]: E0128 14:05:42.851532 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:05:56 crc kubenswrapper[4848]: I0128 14:05:56.850998 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:05:56 crc kubenswrapper[4848]: E0128 14:05:56.852412 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:06:07 crc kubenswrapper[4848]: I0128 14:06:07.850071 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:06:07 crc kubenswrapper[4848]: E0128 14:06:07.851281 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:06:19 crc kubenswrapper[4848]: I0128 14:06:19.850463 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:06:19 crc kubenswrapper[4848]: E0128 14:06:19.851900 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:06:34 crc kubenswrapper[4848]: I0128 14:06:34.857571 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:06:34 crc kubenswrapper[4848]: E0128 14:06:34.858869 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:06:46 crc kubenswrapper[4848]: I0128 14:06:46.850671 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:06:46 crc kubenswrapper[4848]: E0128 14:06:46.851925 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.346715 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jww8r"] Jan 28 14:06:57 crc kubenswrapper[4848]: E0128 14:06:57.352442 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" containerName="keystone-cron" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.352489 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" containerName="keystone-cron" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.352839 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502" containerName="keystone-cron" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.368008 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.373350 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jww8r"] Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.521128 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-catalog-content\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.521418 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-utilities\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.522117 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgg8h\" (UniqueName: \"kubernetes.io/projected/3026307b-47f2-4352-b0fc-2ddf7f5f6437-kube-api-access-xgg8h\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.625293 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-utilities\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.625860 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgg8h\" (UniqueName: \"kubernetes.io/projected/3026307b-47f2-4352-b0fc-2ddf7f5f6437-kube-api-access-xgg8h\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.625981 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-catalog-content\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.626032 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-utilities\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.626578 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-catalog-content\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.658772 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgg8h\" (UniqueName: \"kubernetes.io/projected/3026307b-47f2-4352-b0fc-2ddf7f5f6437-kube-api-access-xgg8h\") pod \"redhat-marketplace-jww8r\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:57 crc kubenswrapper[4848]: I0128 14:06:57.713294 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:06:58 crc kubenswrapper[4848]: I0128 14:06:58.299505 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jww8r"] Jan 28 14:06:58 crc kubenswrapper[4848]: I0128 14:06:58.748373 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jww8r" event={"ID":"3026307b-47f2-4352-b0fc-2ddf7f5f6437","Type":"ContainerStarted","Data":"b946b610b341f6376895b470cc7433a55cb62e81506c5683f431122f90d3362b"} Jan 28 14:06:58 crc kubenswrapper[4848]: I0128 14:06:58.850151 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:06:58 crc kubenswrapper[4848]: E0128 14:06:58.850574 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:06:59 crc kubenswrapper[4848]: I0128 14:06:59.764332 4848 generic.go:334] "Generic (PLEG): container finished" podID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerID="e3fece8d37b23c206226bb36670dc9dc8cee6b821e28e31b804e625c1592a6fa" exitCode=0 Jan 28 14:06:59 crc kubenswrapper[4848]: I0128 14:06:59.764601 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jww8r" event={"ID":"3026307b-47f2-4352-b0fc-2ddf7f5f6437","Type":"ContainerDied","Data":"e3fece8d37b23c206226bb36670dc9dc8cee6b821e28e31b804e625c1592a6fa"} Jan 28 14:06:59 crc kubenswrapper[4848]: I0128 14:06:59.767394 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 14:07:01 crc kubenswrapper[4848]: I0128 14:07:01.789289 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jww8r" event={"ID":"3026307b-47f2-4352-b0fc-2ddf7f5f6437","Type":"ContainerStarted","Data":"4bd650d2f41540a764e687f341ddf2bec56103955851e25bcb6e06d739c0d507"} Jan 28 14:07:03 crc kubenswrapper[4848]: I0128 14:07:03.815444 4848 generic.go:334] "Generic (PLEG): container finished" podID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerID="4bd650d2f41540a764e687f341ddf2bec56103955851e25bcb6e06d739c0d507" exitCode=0 Jan 28 14:07:03 crc kubenswrapper[4848]: I0128 14:07:03.815520 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jww8r" event={"ID":"3026307b-47f2-4352-b0fc-2ddf7f5f6437","Type":"ContainerDied","Data":"4bd650d2f41540a764e687f341ddf2bec56103955851e25bcb6e06d739c0d507"} Jan 28 14:07:06 crc kubenswrapper[4848]: I0128 14:07:06.871771 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jww8r" event={"ID":"3026307b-47f2-4352-b0fc-2ddf7f5f6437","Type":"ContainerStarted","Data":"e8e6d000444ee1ac73b960d042ea8647b1784c590223c26b8df0633583e7ed08"} Jan 28 14:07:06 crc kubenswrapper[4848]: I0128 14:07:06.912503 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jww8r" podStartSLOduration=4.810356934 podStartE2EDuration="9.912477775s" podCreationTimestamp="2026-01-28 14:06:57 +0000 UTC" firstStartedPulling="2026-01-28 14:06:59.767106512 +0000 UTC m=+4846.679323550" lastFinishedPulling="2026-01-28 14:07:04.869227353 +0000 UTC m=+4851.781444391" observedRunningTime="2026-01-28 14:07:06.901019468 +0000 UTC m=+4853.813236506" watchObservedRunningTime="2026-01-28 14:07:06.912477775 +0000 UTC m=+4853.824694813" Jan 28 14:07:07 crc kubenswrapper[4848]: I0128 14:07:07.714330 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:07:07 crc kubenswrapper[4848]: I0128 14:07:07.714852 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:07:07 crc kubenswrapper[4848]: I0128 14:07:07.765515 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:07:12 crc kubenswrapper[4848]: I0128 14:07:12.851197 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:07:12 crc kubenswrapper[4848]: E0128 14:07:12.852288 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:07:17 crc kubenswrapper[4848]: I0128 14:07:17.831188 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:07:17 crc kubenswrapper[4848]: I0128 14:07:17.896331 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jww8r"] Jan 28 14:07:17 crc kubenswrapper[4848]: I0128 14:07:17.991296 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jww8r" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="registry-server" containerID="cri-o://e8e6d000444ee1ac73b960d042ea8647b1784c590223c26b8df0633583e7ed08" gracePeriod=2 Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.013271 4848 generic.go:334] "Generic (PLEG): container finished" podID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerID="e8e6d000444ee1ac73b960d042ea8647b1784c590223c26b8df0633583e7ed08" exitCode=0 Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.013511 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jww8r" event={"ID":"3026307b-47f2-4352-b0fc-2ddf7f5f6437","Type":"ContainerDied","Data":"e8e6d000444ee1ac73b960d042ea8647b1784c590223c26b8df0633583e7ed08"} Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.190384 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.315732 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-catalog-content\") pod \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.316032 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-utilities\") pod \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.316130 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgg8h\" (UniqueName: \"kubernetes.io/projected/3026307b-47f2-4352-b0fc-2ddf7f5f6437-kube-api-access-xgg8h\") pod \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\" (UID: \"3026307b-47f2-4352-b0fc-2ddf7f5f6437\") " Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.317597 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-utilities" (OuterVolumeSpecName: "utilities") pod "3026307b-47f2-4352-b0fc-2ddf7f5f6437" (UID: "3026307b-47f2-4352-b0fc-2ddf7f5f6437"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.338411 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3026307b-47f2-4352-b0fc-2ddf7f5f6437" (UID: "3026307b-47f2-4352-b0fc-2ddf7f5f6437"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.419143 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.419190 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3026307b-47f2-4352-b0fc-2ddf7f5f6437-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.882943 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3026307b-47f2-4352-b0fc-2ddf7f5f6437-kube-api-access-xgg8h" (OuterVolumeSpecName: "kube-api-access-xgg8h") pod "3026307b-47f2-4352-b0fc-2ddf7f5f6437" (UID: "3026307b-47f2-4352-b0fc-2ddf7f5f6437"). InnerVolumeSpecName "kube-api-access-xgg8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:07:19 crc kubenswrapper[4848]: I0128 14:07:19.932363 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgg8h\" (UniqueName: \"kubernetes.io/projected/3026307b-47f2-4352-b0fc-2ddf7f5f6437-kube-api-access-xgg8h\") on node \"crc\" DevicePath \"\"" Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.026868 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jww8r" event={"ID":"3026307b-47f2-4352-b0fc-2ddf7f5f6437","Type":"ContainerDied","Data":"b946b610b341f6376895b470cc7433a55cb62e81506c5683f431122f90d3362b"} Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.026946 4848 scope.go:117] "RemoveContainer" containerID="e8e6d000444ee1ac73b960d042ea8647b1784c590223c26b8df0633583e7ed08" Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.026978 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jww8r" Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.051493 4848 scope.go:117] "RemoveContainer" containerID="4bd650d2f41540a764e687f341ddf2bec56103955851e25bcb6e06d739c0d507" Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.073384 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jww8r"] Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.083414 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jww8r"] Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.083681 4848 scope.go:117] "RemoveContainer" containerID="e3fece8d37b23c206226bb36670dc9dc8cee6b821e28e31b804e625c1592a6fa" Jan 28 14:07:20 crc kubenswrapper[4848]: I0128 14:07:20.863563 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" path="/var/lib/kubelet/pods/3026307b-47f2-4352-b0fc-2ddf7f5f6437/volumes" Jan 28 14:07:25 crc kubenswrapper[4848]: I0128 14:07:25.850966 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:07:25 crc kubenswrapper[4848]: E0128 14:07:25.852128 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.285992 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4x4t9"] Jan 28 14:07:26 crc kubenswrapper[4848]: E0128 14:07:26.286821 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="registry-server" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.286839 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="registry-server" Jan 28 14:07:26 crc kubenswrapper[4848]: E0128 14:07:26.286869 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="extract-content" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.286876 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="extract-content" Jan 28 14:07:26 crc kubenswrapper[4848]: E0128 14:07:26.286904 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="extract-utilities" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.286911 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="extract-utilities" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.287141 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="3026307b-47f2-4352-b0fc-2ddf7f5f6437" containerName="registry-server" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.288937 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.308059 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4x4t9"] Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.390386 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s696t\" (UniqueName: \"kubernetes.io/projected/8bc4aa92-f879-481d-860b-ee7d90e670b0-kube-api-access-s696t\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.390621 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-utilities\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.390667 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-catalog-content\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.492834 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s696t\" (UniqueName: \"kubernetes.io/projected/8bc4aa92-f879-481d-860b-ee7d90e670b0-kube-api-access-s696t\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.493036 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-utilities\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.493084 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-catalog-content\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.493779 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-catalog-content\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.493856 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-utilities\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.529195 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s696t\" (UniqueName: \"kubernetes.io/projected/8bc4aa92-f879-481d-860b-ee7d90e670b0-kube-api-access-s696t\") pod \"certified-operators-4x4t9\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:26 crc kubenswrapper[4848]: I0128 14:07:26.612364 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:27 crc kubenswrapper[4848]: I0128 14:07:27.226107 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4x4t9"] Jan 28 14:07:28 crc kubenswrapper[4848]: I0128 14:07:28.132341 4848 generic.go:334] "Generic (PLEG): container finished" podID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerID="ce8b12f5683e0dd050903b0b43888c93a01b138a10219019b4f3f07767ca02fe" exitCode=0 Jan 28 14:07:28 crc kubenswrapper[4848]: I0128 14:07:28.132433 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4x4t9" event={"ID":"8bc4aa92-f879-481d-860b-ee7d90e670b0","Type":"ContainerDied","Data":"ce8b12f5683e0dd050903b0b43888c93a01b138a10219019b4f3f07767ca02fe"} Jan 28 14:07:28 crc kubenswrapper[4848]: I0128 14:07:28.132956 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4x4t9" event={"ID":"8bc4aa92-f879-481d-860b-ee7d90e670b0","Type":"ContainerStarted","Data":"d191d4f8468313d3189cf59d4c8b16b939d03ccb02d3f6d00329b34fb5561a20"} Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.156858 4848 generic.go:334] "Generic (PLEG): container finished" podID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerID="467fb17bd44f7a495f0548f371043e5f731d5788f9612772da758207536c9750" exitCode=0 Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.156937 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4x4t9" event={"ID":"8bc4aa92-f879-481d-860b-ee7d90e670b0","Type":"ContainerDied","Data":"467fb17bd44f7a495f0548f371043e5f731d5788f9612772da758207536c9750"} Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.293572 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5chfw"] Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.296522 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.306420 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5chfw"] Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.397803 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqtx2\" (UniqueName: \"kubernetes.io/projected/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-kube-api-access-cqtx2\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.398215 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-utilities\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.398363 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-catalog-content\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.500743 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-catalog-content\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.501113 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqtx2\" (UniqueName: \"kubernetes.io/projected/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-kube-api-access-cqtx2\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.501170 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-utilities\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.501968 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-utilities\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.502328 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-catalog-content\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.530169 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqtx2\" (UniqueName: \"kubernetes.io/projected/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-kube-api-access-cqtx2\") pod \"redhat-operators-5chfw\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:30 crc kubenswrapper[4848]: I0128 14:07:30.619265 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:31 crc kubenswrapper[4848]: I0128 14:07:31.188430 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4x4t9" event={"ID":"8bc4aa92-f879-481d-860b-ee7d90e670b0","Type":"ContainerStarted","Data":"e1bd02cca5f77739cdd495100898b104143c16df3f65e598682ce1057b910e59"} Jan 28 14:07:31 crc kubenswrapper[4848]: I0128 14:07:31.285165 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4x4t9" podStartSLOduration=2.8011097879999998 podStartE2EDuration="5.2851309s" podCreationTimestamp="2026-01-28 14:07:26 +0000 UTC" firstStartedPulling="2026-01-28 14:07:28.135655301 +0000 UTC m=+4875.047872339" lastFinishedPulling="2026-01-28 14:07:30.619676413 +0000 UTC m=+4877.531893451" observedRunningTime="2026-01-28 14:07:31.276411857 +0000 UTC m=+4878.188628895" watchObservedRunningTime="2026-01-28 14:07:31.2851309 +0000 UTC m=+4878.197347938" Jan 28 14:07:31 crc kubenswrapper[4848]: I0128 14:07:31.303750 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5chfw"] Jan 28 14:07:32 crc kubenswrapper[4848]: I0128 14:07:32.200866 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerStarted","Data":"74980cbb76aa9e37f14d139e18d5516ac3f05ebd00b9e834e40b9db7fdd4a210"} Jan 28 14:07:32 crc kubenswrapper[4848]: I0128 14:07:32.201334 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerStarted","Data":"7655e0120faf5236df195ad060a476be6d828bb1b2d8958df762e30b6823ebe1"} Jan 28 14:07:33 crc kubenswrapper[4848]: I0128 14:07:33.218464 4848 generic.go:334] "Generic (PLEG): container finished" podID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerID="74980cbb76aa9e37f14d139e18d5516ac3f05ebd00b9e834e40b9db7fdd4a210" exitCode=0 Jan 28 14:07:33 crc kubenswrapper[4848]: I0128 14:07:33.218549 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerDied","Data":"74980cbb76aa9e37f14d139e18d5516ac3f05ebd00b9e834e40b9db7fdd4a210"} Jan 28 14:07:36 crc kubenswrapper[4848]: I0128 14:07:36.261028 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerStarted","Data":"e7f8c3c4f703a465e79307337fbb213d3f234f7abdaf3de3e7c7b7a6dbfa49e5"} Jan 28 14:07:36 crc kubenswrapper[4848]: I0128 14:07:36.612998 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:36 crc kubenswrapper[4848]: I0128 14:07:36.613097 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:36 crc kubenswrapper[4848]: I0128 14:07:36.669336 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:37 crc kubenswrapper[4848]: I0128 14:07:37.328649 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:38 crc kubenswrapper[4848]: I0128 14:07:38.884522 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4x4t9"] Jan 28 14:07:39 crc kubenswrapper[4848]: I0128 14:07:39.301472 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4x4t9" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="registry-server" containerID="cri-o://e1bd02cca5f77739cdd495100898b104143c16df3f65e598682ce1057b910e59" gracePeriod=2 Jan 28 14:07:39 crc kubenswrapper[4848]: I0128 14:07:39.850987 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:07:41 crc kubenswrapper[4848]: I0128 14:07:41.361791 4848 generic.go:334] "Generic (PLEG): container finished" podID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerID="e1bd02cca5f77739cdd495100898b104143c16df3f65e598682ce1057b910e59" exitCode=0 Jan 28 14:07:41 crc kubenswrapper[4848]: I0128 14:07:41.361927 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4x4t9" event={"ID":"8bc4aa92-f879-481d-860b-ee7d90e670b0","Type":"ContainerDied","Data":"e1bd02cca5f77739cdd495100898b104143c16df3f65e598682ce1057b910e59"} Jan 28 14:07:41 crc kubenswrapper[4848]: I0128 14:07:41.371893 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"3a69914290f777fdb5fa47bf15430ade2719379475b8cba336f7cb5682ea5def"} Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.583299 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.605875 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-utilities\") pod \"8bc4aa92-f879-481d-860b-ee7d90e670b0\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.605963 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-catalog-content\") pod \"8bc4aa92-f879-481d-860b-ee7d90e670b0\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.606006 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s696t\" (UniqueName: \"kubernetes.io/projected/8bc4aa92-f879-481d-860b-ee7d90e670b0-kube-api-access-s696t\") pod \"8bc4aa92-f879-481d-860b-ee7d90e670b0\" (UID: \"8bc4aa92-f879-481d-860b-ee7d90e670b0\") " Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.607199 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-utilities" (OuterVolumeSpecName: "utilities") pod "8bc4aa92-f879-481d-860b-ee7d90e670b0" (UID: "8bc4aa92-f879-481d-860b-ee7d90e670b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.614584 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bc4aa92-f879-481d-860b-ee7d90e670b0-kube-api-access-s696t" (OuterVolumeSpecName: "kube-api-access-s696t") pod "8bc4aa92-f879-481d-860b-ee7d90e670b0" (UID: "8bc4aa92-f879-481d-860b-ee7d90e670b0"). InnerVolumeSpecName "kube-api-access-s696t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.652613 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8bc4aa92-f879-481d-860b-ee7d90e670b0" (UID: "8bc4aa92-f879-481d-860b-ee7d90e670b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.708832 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.709136 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc4aa92-f879-481d-860b-ee7d90e670b0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:07:42 crc kubenswrapper[4848]: I0128 14:07:42.709195 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s696t\" (UniqueName: \"kubernetes.io/projected/8bc4aa92-f879-481d-860b-ee7d90e670b0-kube-api-access-s696t\") on node \"crc\" DevicePath \"\"" Jan 28 14:07:43 crc kubenswrapper[4848]: I0128 14:07:43.420407 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4x4t9" event={"ID":"8bc4aa92-f879-481d-860b-ee7d90e670b0","Type":"ContainerDied","Data":"d191d4f8468313d3189cf59d4c8b16b939d03ccb02d3f6d00329b34fb5561a20"} Jan 28 14:07:43 crc kubenswrapper[4848]: I0128 14:07:43.420963 4848 scope.go:117] "RemoveContainer" containerID="e1bd02cca5f77739cdd495100898b104143c16df3f65e598682ce1057b910e59" Jan 28 14:07:43 crc kubenswrapper[4848]: I0128 14:07:43.420541 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4x4t9" Jan 28 14:07:43 crc kubenswrapper[4848]: I0128 14:07:43.458503 4848 scope.go:117] "RemoveContainer" containerID="467fb17bd44f7a495f0548f371043e5f731d5788f9612772da758207536c9750" Jan 28 14:07:43 crc kubenswrapper[4848]: I0128 14:07:43.462508 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4x4t9"] Jan 28 14:07:43 crc kubenswrapper[4848]: I0128 14:07:43.475753 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4x4t9"] Jan 28 14:07:43 crc kubenswrapper[4848]: I0128 14:07:43.510513 4848 scope.go:117] "RemoveContainer" containerID="ce8b12f5683e0dd050903b0b43888c93a01b138a10219019b4f3f07767ca02fe" Jan 28 14:07:44 crc kubenswrapper[4848]: I0128 14:07:44.864887 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" path="/var/lib/kubelet/pods/8bc4aa92-f879-481d-860b-ee7d90e670b0/volumes" Jan 28 14:07:45 crc kubenswrapper[4848]: I0128 14:07:45.453283 4848 generic.go:334] "Generic (PLEG): container finished" podID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerID="e7f8c3c4f703a465e79307337fbb213d3f234f7abdaf3de3e7c7b7a6dbfa49e5" exitCode=0 Jan 28 14:07:45 crc kubenswrapper[4848]: I0128 14:07:45.453379 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerDied","Data":"e7f8c3c4f703a465e79307337fbb213d3f234f7abdaf3de3e7c7b7a6dbfa49e5"} Jan 28 14:07:48 crc kubenswrapper[4848]: I0128 14:07:48.500573 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerStarted","Data":"4985469d331c1faa9540a0e9fb301f8169bc3c4a57d5593717ac51a71256fe15"} Jan 28 14:07:48 crc kubenswrapper[4848]: I0128 14:07:48.531408 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5chfw" podStartSLOduration=4.479828554 podStartE2EDuration="18.531382821s" podCreationTimestamp="2026-01-28 14:07:30 +0000 UTC" firstStartedPulling="2026-01-28 14:07:33.221843415 +0000 UTC m=+4880.134060453" lastFinishedPulling="2026-01-28 14:07:47.273397682 +0000 UTC m=+4894.185614720" observedRunningTime="2026-01-28 14:07:48.526796458 +0000 UTC m=+4895.439013496" watchObservedRunningTime="2026-01-28 14:07:48.531382821 +0000 UTC m=+4895.443599859" Jan 28 14:07:50 crc kubenswrapper[4848]: I0128 14:07:50.620594 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:50 crc kubenswrapper[4848]: I0128 14:07:50.620984 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:07:51 crc kubenswrapper[4848]: I0128 14:07:51.698374 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5chfw" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="registry-server" probeResult="failure" output=< Jan 28 14:07:51 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 14:07:51 crc kubenswrapper[4848]: > Jan 28 14:08:01 crc kubenswrapper[4848]: I0128 14:08:01.229279 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:08:01 crc kubenswrapper[4848]: I0128 14:08:01.282581 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:08:01 crc kubenswrapper[4848]: I0128 14:08:01.490593 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5chfw"] Jan 28 14:08:02 crc kubenswrapper[4848]: I0128 14:08:02.706686 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5chfw" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="registry-server" containerID="cri-o://4985469d331c1faa9540a0e9fb301f8169bc3c4a57d5593717ac51a71256fe15" gracePeriod=2 Jan 28 14:08:03 crc kubenswrapper[4848]: I0128 14:08:03.726220 4848 generic.go:334] "Generic (PLEG): container finished" podID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerID="4985469d331c1faa9540a0e9fb301f8169bc3c4a57d5593717ac51a71256fe15" exitCode=0 Jan 28 14:08:03 crc kubenswrapper[4848]: I0128 14:08:03.726317 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerDied","Data":"4985469d331c1faa9540a0e9fb301f8169bc3c4a57d5593717ac51a71256fe15"} Jan 28 14:08:03 crc kubenswrapper[4848]: I0128 14:08:03.726817 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5chfw" event={"ID":"9a2dcc96-0a62-46fd-9aea-c5583f0d0829","Type":"ContainerDied","Data":"7655e0120faf5236df195ad060a476be6d828bb1b2d8958df762e30b6823ebe1"} Jan 28 14:08:03 crc kubenswrapper[4848]: I0128 14:08:03.726844 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7655e0120faf5236df195ad060a476be6d828bb1b2d8958df762e30b6823ebe1" Jan 28 14:08:03 crc kubenswrapper[4848]: I0128 14:08:03.901282 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.091837 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqtx2\" (UniqueName: \"kubernetes.io/projected/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-kube-api-access-cqtx2\") pod \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.091988 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-utilities\") pod \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.092156 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-catalog-content\") pod \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\" (UID: \"9a2dcc96-0a62-46fd-9aea-c5583f0d0829\") " Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.093580 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-utilities" (OuterVolumeSpecName: "utilities") pod "9a2dcc96-0a62-46fd-9aea-c5583f0d0829" (UID: "9a2dcc96-0a62-46fd-9aea-c5583f0d0829"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.101451 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-kube-api-access-cqtx2" (OuterVolumeSpecName: "kube-api-access-cqtx2") pod "9a2dcc96-0a62-46fd-9aea-c5583f0d0829" (UID: "9a2dcc96-0a62-46fd-9aea-c5583f0d0829"). InnerVolumeSpecName "kube-api-access-cqtx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.197182 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqtx2\" (UniqueName: \"kubernetes.io/projected/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-kube-api-access-cqtx2\") on node \"crc\" DevicePath \"\"" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.197446 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.268204 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a2dcc96-0a62-46fd-9aea-c5583f0d0829" (UID: "9a2dcc96-0a62-46fd-9aea-c5583f0d0829"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.299054 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a2dcc96-0a62-46fd-9aea-c5583f0d0829-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.739575 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5chfw" Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.783968 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5chfw"] Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.796202 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5chfw"] Jan 28 14:08:04 crc kubenswrapper[4848]: I0128 14:08:04.865224 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" path="/var/lib/kubelet/pods/9a2dcc96-0a62-46fd-9aea-c5583f0d0829/volumes" Jan 28 14:10:07 crc kubenswrapper[4848]: I0128 14:10:07.924349 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:10:07 crc kubenswrapper[4848]: I0128 14:10:07.924860 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.761697 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k8dv8"] Jan 28 14:10:32 crc kubenswrapper[4848]: E0128 14:10:32.762792 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="extract-content" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.762812 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="extract-content" Jan 28 14:10:32 crc kubenswrapper[4848]: E0128 14:10:32.762850 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="registry-server" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.762856 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="registry-server" Jan 28 14:10:32 crc kubenswrapper[4848]: E0128 14:10:32.762874 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="extract-utilities" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.762880 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="extract-utilities" Jan 28 14:10:32 crc kubenswrapper[4848]: E0128 14:10:32.762890 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="extract-utilities" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.762896 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="extract-utilities" Jan 28 14:10:32 crc kubenswrapper[4848]: E0128 14:10:32.762905 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="extract-content" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.762910 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="extract-content" Jan 28 14:10:32 crc kubenswrapper[4848]: E0128 14:10:32.762926 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="registry-server" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.762931 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="registry-server" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.763119 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a2dcc96-0a62-46fd-9aea-c5583f0d0829" containerName="registry-server" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.763134 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bc4aa92-f879-481d-860b-ee7d90e670b0" containerName="registry-server" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.765204 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.831784 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k8dv8"] Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.915764 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-utilities\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.915858 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfndg\" (UniqueName: \"kubernetes.io/projected/e5fa268e-f10e-443a-b542-cf4e91e67619-kube-api-access-jfndg\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:32 crc kubenswrapper[4848]: I0128 14:10:32.915906 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-catalog-content\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.018974 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-utilities\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.019103 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfndg\" (UniqueName: \"kubernetes.io/projected/e5fa268e-f10e-443a-b542-cf4e91e67619-kube-api-access-jfndg\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.019161 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-catalog-content\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.019905 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-catalog-content\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.021687 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-utilities\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.062080 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfndg\" (UniqueName: \"kubernetes.io/projected/e5fa268e-f10e-443a-b542-cf4e91e67619-kube-api-access-jfndg\") pod \"community-operators-k8dv8\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.122981 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:33 crc kubenswrapper[4848]: I0128 14:10:33.745938 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k8dv8"] Jan 28 14:10:33 crc kubenswrapper[4848]: W0128 14:10:33.752940 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5fa268e_f10e_443a_b542_cf4e91e67619.slice/crio-a4eb60684fa77f8b00c56b3a536464fdceab12067c6ab99a4eb998c6c8b4b920 WatchSource:0}: Error finding container a4eb60684fa77f8b00c56b3a536464fdceab12067c6ab99a4eb998c6c8b4b920: Status 404 returned error can't find the container with id a4eb60684fa77f8b00c56b3a536464fdceab12067c6ab99a4eb998c6c8b4b920 Jan 28 14:10:34 crc kubenswrapper[4848]: I0128 14:10:34.488850 4848 generic.go:334] "Generic (PLEG): container finished" podID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerID="35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d" exitCode=0 Jan 28 14:10:34 crc kubenswrapper[4848]: I0128 14:10:34.489143 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k8dv8" event={"ID":"e5fa268e-f10e-443a-b542-cf4e91e67619","Type":"ContainerDied","Data":"35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d"} Jan 28 14:10:34 crc kubenswrapper[4848]: I0128 14:10:34.489364 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k8dv8" event={"ID":"e5fa268e-f10e-443a-b542-cf4e91e67619","Type":"ContainerStarted","Data":"a4eb60684fa77f8b00c56b3a536464fdceab12067c6ab99a4eb998c6c8b4b920"} Jan 28 14:10:35 crc kubenswrapper[4848]: I0128 14:10:35.504582 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k8dv8" event={"ID":"e5fa268e-f10e-443a-b542-cf4e91e67619","Type":"ContainerStarted","Data":"10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0"} Jan 28 14:10:37 crc kubenswrapper[4848]: I0128 14:10:37.529519 4848 generic.go:334] "Generic (PLEG): container finished" podID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerID="10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0" exitCode=0 Jan 28 14:10:37 crc kubenswrapper[4848]: I0128 14:10:37.529614 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k8dv8" event={"ID":"e5fa268e-f10e-443a-b542-cf4e91e67619","Type":"ContainerDied","Data":"10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0"} Jan 28 14:10:37 crc kubenswrapper[4848]: I0128 14:10:37.925104 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:10:37 crc kubenswrapper[4848]: I0128 14:10:37.925172 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:10:38 crc kubenswrapper[4848]: I0128 14:10:38.547130 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k8dv8" event={"ID":"e5fa268e-f10e-443a-b542-cf4e91e67619","Type":"ContainerStarted","Data":"f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4"} Jan 28 14:10:38 crc kubenswrapper[4848]: I0128 14:10:38.581469 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k8dv8" podStartSLOduration=3.056203531 podStartE2EDuration="6.581438553s" podCreationTimestamp="2026-01-28 14:10:32 +0000 UTC" firstStartedPulling="2026-01-28 14:10:34.494312411 +0000 UTC m=+5061.406529449" lastFinishedPulling="2026-01-28 14:10:38.019547433 +0000 UTC m=+5064.931764471" observedRunningTime="2026-01-28 14:10:38.572654847 +0000 UTC m=+5065.484871905" watchObservedRunningTime="2026-01-28 14:10:38.581438553 +0000 UTC m=+5065.493655591" Jan 28 14:10:43 crc kubenswrapper[4848]: I0128 14:10:43.123649 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:43 crc kubenswrapper[4848]: I0128 14:10:43.125977 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:43 crc kubenswrapper[4848]: I0128 14:10:43.187667 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:43 crc kubenswrapper[4848]: I0128 14:10:43.660000 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:43 crc kubenswrapper[4848]: I0128 14:10:43.728350 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k8dv8"] Jan 28 14:10:45 crc kubenswrapper[4848]: I0128 14:10:45.621518 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k8dv8" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="registry-server" containerID="cri-o://f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4" gracePeriod=2 Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.561053 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.623862 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfndg\" (UniqueName: \"kubernetes.io/projected/e5fa268e-f10e-443a-b542-cf4e91e67619-kube-api-access-jfndg\") pod \"e5fa268e-f10e-443a-b542-cf4e91e67619\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.623943 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-utilities\") pod \"e5fa268e-f10e-443a-b542-cf4e91e67619\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.624063 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-catalog-content\") pod \"e5fa268e-f10e-443a-b542-cf4e91e67619\" (UID: \"e5fa268e-f10e-443a-b542-cf4e91e67619\") " Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.627087 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-utilities" (OuterVolumeSpecName: "utilities") pod "e5fa268e-f10e-443a-b542-cf4e91e67619" (UID: "e5fa268e-f10e-443a-b542-cf4e91e67619"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.646073 4848 generic.go:334] "Generic (PLEG): container finished" podID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerID="f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4" exitCode=0 Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.646146 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k8dv8" event={"ID":"e5fa268e-f10e-443a-b542-cf4e91e67619","Type":"ContainerDied","Data":"f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4"} Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.646235 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k8dv8" event={"ID":"e5fa268e-f10e-443a-b542-cf4e91e67619","Type":"ContainerDied","Data":"a4eb60684fa77f8b00c56b3a536464fdceab12067c6ab99a4eb998c6c8b4b920"} Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.646296 4848 scope.go:117] "RemoveContainer" containerID="f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.646737 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k8dv8" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.648737 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5fa268e-f10e-443a-b542-cf4e91e67619-kube-api-access-jfndg" (OuterVolumeSpecName: "kube-api-access-jfndg") pod "e5fa268e-f10e-443a-b542-cf4e91e67619" (UID: "e5fa268e-f10e-443a-b542-cf4e91e67619"). InnerVolumeSpecName "kube-api-access-jfndg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.692114 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5fa268e-f10e-443a-b542-cf4e91e67619" (UID: "e5fa268e-f10e-443a-b542-cf4e91e67619"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.718882 4848 scope.go:117] "RemoveContainer" containerID="10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.727568 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfndg\" (UniqueName: \"kubernetes.io/projected/e5fa268e-f10e-443a-b542-cf4e91e67619-kube-api-access-jfndg\") on node \"crc\" DevicePath \"\"" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.727593 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.727604 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fa268e-f10e-443a-b542-cf4e91e67619-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.752237 4848 scope.go:117] "RemoveContainer" containerID="35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.791352 4848 scope.go:117] "RemoveContainer" containerID="f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4" Jan 28 14:10:47 crc kubenswrapper[4848]: E0128 14:10:47.794636 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4\": container with ID starting with f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4 not found: ID does not exist" containerID="f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.794702 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4"} err="failed to get container status \"f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4\": rpc error: code = NotFound desc = could not find container \"f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4\": container with ID starting with f2b4e0bebfaf9e5b0b16c35b61547ff3418ff186dce3864df43a2936d043dea4 not found: ID does not exist" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.794750 4848 scope.go:117] "RemoveContainer" containerID="10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0" Jan 28 14:10:47 crc kubenswrapper[4848]: E0128 14:10:47.795400 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0\": container with ID starting with 10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0 not found: ID does not exist" containerID="10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.795437 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0"} err="failed to get container status \"10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0\": rpc error: code = NotFound desc = could not find container \"10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0\": container with ID starting with 10d4eb5a87b1cf559908fc68797f1540dd2f9f089a9af74776660628457bcdc0 not found: ID does not exist" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.795461 4848 scope.go:117] "RemoveContainer" containerID="35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d" Jan 28 14:10:47 crc kubenswrapper[4848]: E0128 14:10:47.795849 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d\": container with ID starting with 35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d not found: ID does not exist" containerID="35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.795889 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d"} err="failed to get container status \"35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d\": rpc error: code = NotFound desc = could not find container \"35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d\": container with ID starting with 35b5b0c08fe044cca423535a8af1cfb433bf92c42f00d3b64e35a42d2904050d not found: ID does not exist" Jan 28 14:10:47 crc kubenswrapper[4848]: I0128 14:10:47.990535 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k8dv8"] Jan 28 14:10:48 crc kubenswrapper[4848]: I0128 14:10:48.002285 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k8dv8"] Jan 28 14:10:48 crc kubenswrapper[4848]: I0128 14:10:48.873979 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" path="/var/lib/kubelet/pods/e5fa268e-f10e-443a-b542-cf4e91e67619/volumes" Jan 28 14:11:07 crc kubenswrapper[4848]: I0128 14:11:07.924568 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:11:07 crc kubenswrapper[4848]: I0128 14:11:07.925108 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:11:07 crc kubenswrapper[4848]: I0128 14:11:07.925167 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:11:07 crc kubenswrapper[4848]: I0128 14:11:07.926271 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3a69914290f777fdb5fa47bf15430ade2719379475b8cba336f7cb5682ea5def"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:11:07 crc kubenswrapper[4848]: I0128 14:11:07.926347 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://3a69914290f777fdb5fa47bf15430ade2719379475b8cba336f7cb5682ea5def" gracePeriod=600 Jan 28 14:11:08 crc kubenswrapper[4848]: I0128 14:11:08.894768 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="3a69914290f777fdb5fa47bf15430ade2719379475b8cba336f7cb5682ea5def" exitCode=0 Jan 28 14:11:08 crc kubenswrapper[4848]: I0128 14:11:08.894868 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"3a69914290f777fdb5fa47bf15430ade2719379475b8cba336f7cb5682ea5def"} Jan 28 14:11:08 crc kubenswrapper[4848]: I0128 14:11:08.895822 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7"} Jan 28 14:11:08 crc kubenswrapper[4848]: I0128 14:11:08.895868 4848 scope.go:117] "RemoveContainer" containerID="374afcb9443850e267dd4f3493aa36e3c5dd2fee8d942ac7f5a00d018406bcab" Jan 28 14:13:37 crc kubenswrapper[4848]: I0128 14:13:37.924511 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:13:37 crc kubenswrapper[4848]: I0128 14:13:37.925141 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:14:07 crc kubenswrapper[4848]: I0128 14:14:07.422130 4848 scope.go:117] "RemoveContainer" containerID="4985469d331c1faa9540a0e9fb301f8169bc3c4a57d5593717ac51a71256fe15" Jan 28 14:14:07 crc kubenswrapper[4848]: I0128 14:14:07.454485 4848 scope.go:117] "RemoveContainer" containerID="74980cbb76aa9e37f14d139e18d5516ac3f05ebd00b9e834e40b9db7fdd4a210" Jan 28 14:14:07 crc kubenswrapper[4848]: I0128 14:14:07.479373 4848 scope.go:117] "RemoveContainer" containerID="e7f8c3c4f703a465e79307337fbb213d3f234f7abdaf3de3e7c7b7a6dbfa49e5" Jan 28 14:14:07 crc kubenswrapper[4848]: I0128 14:14:07.924457 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:14:07 crc kubenswrapper[4848]: I0128 14:14:07.924555 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:14:37 crc kubenswrapper[4848]: I0128 14:14:37.925085 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:14:37 crc kubenswrapper[4848]: I0128 14:14:37.925658 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:14:37 crc kubenswrapper[4848]: I0128 14:14:37.925726 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:14:37 crc kubenswrapper[4848]: I0128 14:14:37.926878 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:14:37 crc kubenswrapper[4848]: I0128 14:14:37.926963 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" gracePeriod=600 Jan 28 14:14:38 crc kubenswrapper[4848]: E0128 14:14:38.628538 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:14:39 crc kubenswrapper[4848]: I0128 14:14:39.135325 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" exitCode=0 Jan 28 14:14:39 crc kubenswrapper[4848]: I0128 14:14:39.135452 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7"} Jan 28 14:14:39 crc kubenswrapper[4848]: I0128 14:14:39.135769 4848 scope.go:117] "RemoveContainer" containerID="3a69914290f777fdb5fa47bf15430ade2719379475b8cba336f7cb5682ea5def" Jan 28 14:14:39 crc kubenswrapper[4848]: I0128 14:14:39.136997 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:14:39 crc kubenswrapper[4848]: E0128 14:14:39.137507 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:14:53 crc kubenswrapper[4848]: I0128 14:14:53.850509 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:14:53 crc kubenswrapper[4848]: E0128 14:14:53.851630 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.174453 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2"] Jan 28 14:15:00 crc kubenswrapper[4848]: E0128 14:15:00.176076 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="registry-server" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.176099 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="registry-server" Jan 28 14:15:00 crc kubenswrapper[4848]: E0128 14:15:00.176152 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="extract-utilities" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.176162 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="extract-utilities" Jan 28 14:15:00 crc kubenswrapper[4848]: E0128 14:15:00.176185 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="extract-content" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.176195 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="extract-content" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.176492 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5fa268e-f10e-443a-b542-cf4e91e67619" containerName="registry-server" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.177642 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.180539 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.182100 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.210010 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2"] Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.249965 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7sc8\" (UniqueName: \"kubernetes.io/projected/540ea0a6-9ffb-41db-9092-03b3e7edd6be-kube-api-access-q7sc8\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.250020 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/540ea0a6-9ffb-41db-9092-03b3e7edd6be-config-volume\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.250442 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/540ea0a6-9ffb-41db-9092-03b3e7edd6be-secret-volume\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.353570 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/540ea0a6-9ffb-41db-9092-03b3e7edd6be-secret-volume\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.353781 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7sc8\" (UniqueName: \"kubernetes.io/projected/540ea0a6-9ffb-41db-9092-03b3e7edd6be-kube-api-access-q7sc8\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.353815 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/540ea0a6-9ffb-41db-9092-03b3e7edd6be-config-volume\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.355539 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/540ea0a6-9ffb-41db-9092-03b3e7edd6be-config-volume\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.381457 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/540ea0a6-9ffb-41db-9092-03b3e7edd6be-secret-volume\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.395306 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7sc8\" (UniqueName: \"kubernetes.io/projected/540ea0a6-9ffb-41db-9092-03b3e7edd6be-kube-api-access-q7sc8\") pod \"collect-profiles-29493495-85ng2\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:00 crc kubenswrapper[4848]: I0128 14:15:00.510029 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:01 crc kubenswrapper[4848]: I0128 14:15:01.038518 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2"] Jan 28 14:15:01 crc kubenswrapper[4848]: I0128 14:15:01.401301 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" event={"ID":"540ea0a6-9ffb-41db-9092-03b3e7edd6be","Type":"ContainerStarted","Data":"ba0531f608da8cc4a6de0515ba2997b9470bd2c9bc651e3b50ac01057c6ace33"} Jan 28 14:15:01 crc kubenswrapper[4848]: I0128 14:15:01.401363 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" event={"ID":"540ea0a6-9ffb-41db-9092-03b3e7edd6be","Type":"ContainerStarted","Data":"b31687be45706ddec72a61abdd0c756355a7f65264a3c33e342e2e6bbdbde87c"} Jan 28 14:15:01 crc kubenswrapper[4848]: I0128 14:15:01.432125 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" podStartSLOduration=1.432100245 podStartE2EDuration="1.432100245s" podCreationTimestamp="2026-01-28 14:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 14:15:01.430657015 +0000 UTC m=+5328.342874063" watchObservedRunningTime="2026-01-28 14:15:01.432100245 +0000 UTC m=+5328.344317283" Jan 28 14:15:02 crc kubenswrapper[4848]: I0128 14:15:02.414728 4848 generic.go:334] "Generic (PLEG): container finished" podID="540ea0a6-9ffb-41db-9092-03b3e7edd6be" containerID="ba0531f608da8cc4a6de0515ba2997b9470bd2c9bc651e3b50ac01057c6ace33" exitCode=0 Jan 28 14:15:02 crc kubenswrapper[4848]: I0128 14:15:02.414806 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" event={"ID":"540ea0a6-9ffb-41db-9092-03b3e7edd6be","Type":"ContainerDied","Data":"ba0531f608da8cc4a6de0515ba2997b9470bd2c9bc651e3b50ac01057c6ace33"} Jan 28 14:15:03 crc kubenswrapper[4848]: I0128 14:15:03.865619 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:03 crc kubenswrapper[4848]: I0128 14:15:03.967694 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7sc8\" (UniqueName: \"kubernetes.io/projected/540ea0a6-9ffb-41db-9092-03b3e7edd6be-kube-api-access-q7sc8\") pod \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " Jan 28 14:15:03 crc kubenswrapper[4848]: I0128 14:15:03.967955 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/540ea0a6-9ffb-41db-9092-03b3e7edd6be-secret-volume\") pod \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " Jan 28 14:15:03 crc kubenswrapper[4848]: I0128 14:15:03.968036 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/540ea0a6-9ffb-41db-9092-03b3e7edd6be-config-volume\") pod \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\" (UID: \"540ea0a6-9ffb-41db-9092-03b3e7edd6be\") " Jan 28 14:15:03 crc kubenswrapper[4848]: I0128 14:15:03.969964 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/540ea0a6-9ffb-41db-9092-03b3e7edd6be-config-volume" (OuterVolumeSpecName: "config-volume") pod "540ea0a6-9ffb-41db-9092-03b3e7edd6be" (UID: "540ea0a6-9ffb-41db-9092-03b3e7edd6be"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 14:15:03 crc kubenswrapper[4848]: I0128 14:15:03.977376 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/540ea0a6-9ffb-41db-9092-03b3e7edd6be-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "540ea0a6-9ffb-41db-9092-03b3e7edd6be" (UID: "540ea0a6-9ffb-41db-9092-03b3e7edd6be"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:15:03 crc kubenswrapper[4848]: I0128 14:15:03.977496 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/540ea0a6-9ffb-41db-9092-03b3e7edd6be-kube-api-access-q7sc8" (OuterVolumeSpecName: "kube-api-access-q7sc8") pod "540ea0a6-9ffb-41db-9092-03b3e7edd6be" (UID: "540ea0a6-9ffb-41db-9092-03b3e7edd6be"). InnerVolumeSpecName "kube-api-access-q7sc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.070940 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/540ea0a6-9ffb-41db-9092-03b3e7edd6be-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.070991 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/540ea0a6-9ffb-41db-9092-03b3e7edd6be-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.071016 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7sc8\" (UniqueName: \"kubernetes.io/projected/540ea0a6-9ffb-41db-9092-03b3e7edd6be-kube-api-access-q7sc8\") on node \"crc\" DevicePath \"\"" Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.440269 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" event={"ID":"540ea0a6-9ffb-41db-9092-03b3e7edd6be","Type":"ContainerDied","Data":"b31687be45706ddec72a61abdd0c756355a7f65264a3c33e342e2e6bbdbde87c"} Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.440326 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b31687be45706ddec72a61abdd0c756355a7f65264a3c33e342e2e6bbdbde87c" Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.440340 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493495-85ng2" Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.519923 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz"] Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.529795 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493450-ktmsz"] Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.863917 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:15:04 crc kubenswrapper[4848]: E0128 14:15:04.864813 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:15:04 crc kubenswrapper[4848]: I0128 14:15:04.867380 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d52ea87-50d4-46fc-a882-2a2966210069" path="/var/lib/kubelet/pods/9d52ea87-50d4-46fc-a882-2a2966210069/volumes" Jan 28 14:15:07 crc kubenswrapper[4848]: I0128 14:15:07.608435 4848 scope.go:117] "RemoveContainer" containerID="0ec6a5cd9b9a54715b8c38fc657f0bbf60bd62dc68d71b2e847fe9d27cfb84b3" Jan 28 14:15:19 crc kubenswrapper[4848]: I0128 14:15:19.850939 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:15:19 crc kubenswrapper[4848]: E0128 14:15:19.852163 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:15:31 crc kubenswrapper[4848]: I0128 14:15:31.851466 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:15:31 crc kubenswrapper[4848]: E0128 14:15:31.852693 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:15:46 crc kubenswrapper[4848]: I0128 14:15:46.851233 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:15:46 crc kubenswrapper[4848]: E0128 14:15:46.852317 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:16:00 crc kubenswrapper[4848]: I0128 14:16:00.851907 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:16:00 crc kubenswrapper[4848]: E0128 14:16:00.853201 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:16:15 crc kubenswrapper[4848]: I0128 14:16:15.849994 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:16:15 crc kubenswrapper[4848]: E0128 14:16:15.850658 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:16:30 crc kubenswrapper[4848]: I0128 14:16:30.850836 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:16:30 crc kubenswrapper[4848]: E0128 14:16:30.851835 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:16:45 crc kubenswrapper[4848]: I0128 14:16:45.850878 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:16:45 crc kubenswrapper[4848]: E0128 14:16:45.851712 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:16:58 crc kubenswrapper[4848]: I0128 14:16:58.851028 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:16:58 crc kubenswrapper[4848]: E0128 14:16:58.852711 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:17:10 crc kubenswrapper[4848]: I0128 14:17:10.851400 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:17:10 crc kubenswrapper[4848]: E0128 14:17:10.852808 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:17:22 crc kubenswrapper[4848]: I0128 14:17:22.849979 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:17:22 crc kubenswrapper[4848]: E0128 14:17:22.850728 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.494819 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hhvqc"] Jan 28 14:17:28 crc kubenswrapper[4848]: E0128 14:17:28.499725 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="540ea0a6-9ffb-41db-9092-03b3e7edd6be" containerName="collect-profiles" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.499766 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="540ea0a6-9ffb-41db-9092-03b3e7edd6be" containerName="collect-profiles" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.500051 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="540ea0a6-9ffb-41db-9092-03b3e7edd6be" containerName="collect-profiles" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.502004 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.535993 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hhvqc"] Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.540698 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9q27\" (UniqueName: \"kubernetes.io/projected/ed748ea6-7272-41dd-93a2-6df852332f51-kube-api-access-p9q27\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.540756 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-utilities\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.540853 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-catalog-content\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.644001 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9q27\" (UniqueName: \"kubernetes.io/projected/ed748ea6-7272-41dd-93a2-6df852332f51-kube-api-access-p9q27\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.644066 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-utilities\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.644169 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-catalog-content\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.644864 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-utilities\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.645183 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-catalog-content\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.677586 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9q27\" (UniqueName: \"kubernetes.io/projected/ed748ea6-7272-41dd-93a2-6df852332f51-kube-api-access-p9q27\") pod \"certified-operators-hhvqc\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:28 crc kubenswrapper[4848]: I0128 14:17:28.827882 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:29 crc kubenswrapper[4848]: I0128 14:17:29.395039 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hhvqc"] Jan 28 14:17:30 crc kubenswrapper[4848]: I0128 14:17:30.192754 4848 generic.go:334] "Generic (PLEG): container finished" podID="ed748ea6-7272-41dd-93a2-6df852332f51" containerID="a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87" exitCode=0 Jan 28 14:17:30 crc kubenswrapper[4848]: I0128 14:17:30.192823 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhvqc" event={"ID":"ed748ea6-7272-41dd-93a2-6df852332f51","Type":"ContainerDied","Data":"a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87"} Jan 28 14:17:30 crc kubenswrapper[4848]: I0128 14:17:30.193362 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhvqc" event={"ID":"ed748ea6-7272-41dd-93a2-6df852332f51","Type":"ContainerStarted","Data":"8b58fc5e779f3d548050cd709c1a9f6f426f96113f03b52880ac492e98490b6c"} Jan 28 14:17:30 crc kubenswrapper[4848]: I0128 14:17:30.198782 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 14:17:31 crc kubenswrapper[4848]: I0128 14:17:31.208725 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhvqc" event={"ID":"ed748ea6-7272-41dd-93a2-6df852332f51","Type":"ContainerStarted","Data":"b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29"} Jan 28 14:17:32 crc kubenswrapper[4848]: I0128 14:17:32.228330 4848 generic.go:334] "Generic (PLEG): container finished" podID="ed748ea6-7272-41dd-93a2-6df852332f51" containerID="b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29" exitCode=0 Jan 28 14:17:32 crc kubenswrapper[4848]: I0128 14:17:32.228475 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhvqc" event={"ID":"ed748ea6-7272-41dd-93a2-6df852332f51","Type":"ContainerDied","Data":"b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29"} Jan 28 14:17:33 crc kubenswrapper[4848]: I0128 14:17:33.242970 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhvqc" event={"ID":"ed748ea6-7272-41dd-93a2-6df852332f51","Type":"ContainerStarted","Data":"46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227"} Jan 28 14:17:33 crc kubenswrapper[4848]: I0128 14:17:33.269554 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hhvqc" podStartSLOduration=2.59273762 podStartE2EDuration="5.269527224s" podCreationTimestamp="2026-01-28 14:17:28 +0000 UTC" firstStartedPulling="2026-01-28 14:17:30.198482359 +0000 UTC m=+5477.110699397" lastFinishedPulling="2026-01-28 14:17:32.875271963 +0000 UTC m=+5479.787489001" observedRunningTime="2026-01-28 14:17:33.262474414 +0000 UTC m=+5480.174691452" watchObservedRunningTime="2026-01-28 14:17:33.269527224 +0000 UTC m=+5480.181744262" Jan 28 14:17:36 crc kubenswrapper[4848]: I0128 14:17:36.850999 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:17:36 crc kubenswrapper[4848]: E0128 14:17:36.852104 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:17:38 crc kubenswrapper[4848]: I0128 14:17:38.828841 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:38 crc kubenswrapper[4848]: I0128 14:17:38.829409 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:38 crc kubenswrapper[4848]: I0128 14:17:38.892546 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:39 crc kubenswrapper[4848]: I0128 14:17:39.374909 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:39 crc kubenswrapper[4848]: I0128 14:17:39.439165 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hhvqc"] Jan 28 14:17:41 crc kubenswrapper[4848]: I0128 14:17:41.348745 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hhvqc" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="registry-server" containerID="cri-o://46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227" gracePeriod=2 Jan 28 14:17:41 crc kubenswrapper[4848]: I0128 14:17:41.884545 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.002234 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9q27\" (UniqueName: \"kubernetes.io/projected/ed748ea6-7272-41dd-93a2-6df852332f51-kube-api-access-p9q27\") pod \"ed748ea6-7272-41dd-93a2-6df852332f51\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.002384 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-utilities\") pod \"ed748ea6-7272-41dd-93a2-6df852332f51\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.002468 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-catalog-content\") pod \"ed748ea6-7272-41dd-93a2-6df852332f51\" (UID: \"ed748ea6-7272-41dd-93a2-6df852332f51\") " Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.009123 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed748ea6-7272-41dd-93a2-6df852332f51-kube-api-access-p9q27" (OuterVolumeSpecName: "kube-api-access-p9q27") pod "ed748ea6-7272-41dd-93a2-6df852332f51" (UID: "ed748ea6-7272-41dd-93a2-6df852332f51"). InnerVolumeSpecName "kube-api-access-p9q27". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.012385 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-utilities" (OuterVolumeSpecName: "utilities") pod "ed748ea6-7272-41dd-93a2-6df852332f51" (UID: "ed748ea6-7272-41dd-93a2-6df852332f51"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.065928 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed748ea6-7272-41dd-93a2-6df852332f51" (UID: "ed748ea6-7272-41dd-93a2-6df852332f51"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.106022 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9q27\" (UniqueName: \"kubernetes.io/projected/ed748ea6-7272-41dd-93a2-6df852332f51-kube-api-access-p9q27\") on node \"crc\" DevicePath \"\"" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.106065 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.106081 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed748ea6-7272-41dd-93a2-6df852332f51-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.361431 4848 generic.go:334] "Generic (PLEG): container finished" podID="ed748ea6-7272-41dd-93a2-6df852332f51" containerID="46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227" exitCode=0 Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.361519 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhvqc" event={"ID":"ed748ea6-7272-41dd-93a2-6df852332f51","Type":"ContainerDied","Data":"46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227"} Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.362503 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhvqc" event={"ID":"ed748ea6-7272-41dd-93a2-6df852332f51","Type":"ContainerDied","Data":"8b58fc5e779f3d548050cd709c1a9f6f426f96113f03b52880ac492e98490b6c"} Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.362545 4848 scope.go:117] "RemoveContainer" containerID="46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.361554 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhvqc" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.389493 4848 scope.go:117] "RemoveContainer" containerID="b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.414917 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hhvqc"] Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.421601 4848 scope.go:117] "RemoveContainer" containerID="a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.431672 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hhvqc"] Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.480627 4848 scope.go:117] "RemoveContainer" containerID="46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227" Jan 28 14:17:42 crc kubenswrapper[4848]: E0128 14:17:42.481201 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227\": container with ID starting with 46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227 not found: ID does not exist" containerID="46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.481331 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227"} err="failed to get container status \"46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227\": rpc error: code = NotFound desc = could not find container \"46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227\": container with ID starting with 46c2be5ecd8977842f9eb3b7c7b1d3f3c6ccf603d8e2a4afd9a90af6fea71227 not found: ID does not exist" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.481424 4848 scope.go:117] "RemoveContainer" containerID="b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29" Jan 28 14:17:42 crc kubenswrapper[4848]: E0128 14:17:42.481796 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29\": container with ID starting with b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29 not found: ID does not exist" containerID="b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.481856 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29"} err="failed to get container status \"b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29\": rpc error: code = NotFound desc = could not find container \"b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29\": container with ID starting with b7ee54e1ff20648bc8c328b6c1c80f81481d588ef1ac35ded2270160c2355c29 not found: ID does not exist" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.481911 4848 scope.go:117] "RemoveContainer" containerID="a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87" Jan 28 14:17:42 crc kubenswrapper[4848]: E0128 14:17:42.482376 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87\": container with ID starting with a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87 not found: ID does not exist" containerID="a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.482407 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87"} err="failed to get container status \"a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87\": rpc error: code = NotFound desc = could not find container \"a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87\": container with ID starting with a6bbc5524043f0e82d1e846a1eb49f97c9701f3763175c61bbb87df796055e87 not found: ID does not exist" Jan 28 14:17:42 crc kubenswrapper[4848]: I0128 14:17:42.867094 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" path="/var/lib/kubelet/pods/ed748ea6-7272-41dd-93a2-6df852332f51/volumes" Jan 28 14:17:51 crc kubenswrapper[4848]: I0128 14:17:51.850879 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:17:51 crc kubenswrapper[4848]: E0128 14:17:51.852158 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:18:02 crc kubenswrapper[4848]: I0128 14:18:02.851318 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:18:02 crc kubenswrapper[4848]: E0128 14:18:02.852120 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:18:17 crc kubenswrapper[4848]: I0128 14:18:17.850357 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:18:17 crc kubenswrapper[4848]: E0128 14:18:17.851454 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:18:30 crc kubenswrapper[4848]: I0128 14:18:30.850741 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:18:30 crc kubenswrapper[4848]: E0128 14:18:30.852121 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:18:41 crc kubenswrapper[4848]: I0128 14:18:41.827703 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g96sp"] Jan 28 14:18:41 crc kubenswrapper[4848]: E0128 14:18:41.829292 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="extract-utilities" Jan 28 14:18:41 crc kubenswrapper[4848]: I0128 14:18:41.829316 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="extract-utilities" Jan 28 14:18:41 crc kubenswrapper[4848]: E0128 14:18:41.829367 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="extract-content" Jan 28 14:18:41 crc kubenswrapper[4848]: I0128 14:18:41.829377 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="extract-content" Jan 28 14:18:41 crc kubenswrapper[4848]: E0128 14:18:41.829413 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="registry-server" Jan 28 14:18:41 crc kubenswrapper[4848]: I0128 14:18:41.829422 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="registry-server" Jan 28 14:18:41 crc kubenswrapper[4848]: I0128 14:18:41.829713 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed748ea6-7272-41dd-93a2-6df852332f51" containerName="registry-server" Jan 28 14:18:41 crc kubenswrapper[4848]: I0128 14:18:41.834037 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:41 crc kubenswrapper[4848]: I0128 14:18:41.840171 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g96sp"] Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.000052 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-utilities\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.000174 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-catalog-content\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.000937 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgz2n\" (UniqueName: \"kubernetes.io/projected/89e0620f-240f-4f61-8630-67357f49a7f7-kube-api-access-dgz2n\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.103833 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-utilities\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.103896 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-catalog-content\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.104094 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgz2n\" (UniqueName: \"kubernetes.io/projected/89e0620f-240f-4f61-8630-67357f49a7f7-kube-api-access-dgz2n\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.104931 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-catalog-content\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.106690 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-utilities\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.133037 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgz2n\" (UniqueName: \"kubernetes.io/projected/89e0620f-240f-4f61-8630-67357f49a7f7-kube-api-access-dgz2n\") pod \"redhat-operators-g96sp\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.177898 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:42 crc kubenswrapper[4848]: I0128 14:18:42.767909 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g96sp"] Jan 28 14:18:43 crc kubenswrapper[4848]: I0128 14:18:43.044646 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g96sp" event={"ID":"89e0620f-240f-4f61-8630-67357f49a7f7","Type":"ContainerStarted","Data":"ba685b69e9ae70cddd31ba94900f57a133b9b78270ed4101b71a74113a31698b"} Jan 28 14:18:44 crc kubenswrapper[4848]: I0128 14:18:44.061451 4848 generic.go:334] "Generic (PLEG): container finished" podID="89e0620f-240f-4f61-8630-67357f49a7f7" containerID="65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13" exitCode=0 Jan 28 14:18:44 crc kubenswrapper[4848]: I0128 14:18:44.061763 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g96sp" event={"ID":"89e0620f-240f-4f61-8630-67357f49a7f7","Type":"ContainerDied","Data":"65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13"} Jan 28 14:18:44 crc kubenswrapper[4848]: I0128 14:18:44.862565 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:18:44 crc kubenswrapper[4848]: E0128 14:18:44.862862 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:18:46 crc kubenswrapper[4848]: I0128 14:18:46.094498 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g96sp" event={"ID":"89e0620f-240f-4f61-8630-67357f49a7f7","Type":"ContainerStarted","Data":"e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00"} Jan 28 14:18:48 crc kubenswrapper[4848]: I0128 14:18:48.117851 4848 generic.go:334] "Generic (PLEG): container finished" podID="89e0620f-240f-4f61-8630-67357f49a7f7" containerID="e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00" exitCode=0 Jan 28 14:18:48 crc kubenswrapper[4848]: I0128 14:18:48.117977 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g96sp" event={"ID":"89e0620f-240f-4f61-8630-67357f49a7f7","Type":"ContainerDied","Data":"e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00"} Jan 28 14:18:49 crc kubenswrapper[4848]: I0128 14:18:49.134225 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g96sp" event={"ID":"89e0620f-240f-4f61-8630-67357f49a7f7","Type":"ContainerStarted","Data":"c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56"} Jan 28 14:18:49 crc kubenswrapper[4848]: I0128 14:18:49.160432 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g96sp" podStartSLOduration=3.672994837 podStartE2EDuration="8.1604074s" podCreationTimestamp="2026-01-28 14:18:41 +0000 UTC" firstStartedPulling="2026-01-28 14:18:44.063376957 +0000 UTC m=+5550.975593995" lastFinishedPulling="2026-01-28 14:18:48.55078952 +0000 UTC m=+5555.463006558" observedRunningTime="2026-01-28 14:18:49.157686297 +0000 UTC m=+5556.069903355" watchObservedRunningTime="2026-01-28 14:18:49.1604074 +0000 UTC m=+5556.072624438" Jan 28 14:18:52 crc kubenswrapper[4848]: I0128 14:18:52.179131 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:52 crc kubenswrapper[4848]: I0128 14:18:52.179760 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:18:53 crc kubenswrapper[4848]: I0128 14:18:53.241081 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g96sp" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="registry-server" probeResult="failure" output=< Jan 28 14:18:53 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 14:18:53 crc kubenswrapper[4848]: > Jan 28 14:18:55 crc kubenswrapper[4848]: I0128 14:18:55.850743 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:18:55 crc kubenswrapper[4848]: E0128 14:18:55.851190 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:19:02 crc kubenswrapper[4848]: I0128 14:19:02.244429 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:19:02 crc kubenswrapper[4848]: I0128 14:19:02.310324 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:19:02 crc kubenswrapper[4848]: I0128 14:19:02.489425 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g96sp"] Jan 28 14:19:03 crc kubenswrapper[4848]: I0128 14:19:03.294700 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-g96sp" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="registry-server" containerID="cri-o://c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56" gracePeriod=2 Jan 28 14:19:03 crc kubenswrapper[4848]: I0128 14:19:03.827731 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:19:03 crc kubenswrapper[4848]: I0128 14:19:03.916596 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-catalog-content\") pod \"89e0620f-240f-4f61-8630-67357f49a7f7\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " Jan 28 14:19:03 crc kubenswrapper[4848]: I0128 14:19:03.916768 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgz2n\" (UniqueName: \"kubernetes.io/projected/89e0620f-240f-4f61-8630-67357f49a7f7-kube-api-access-dgz2n\") pod \"89e0620f-240f-4f61-8630-67357f49a7f7\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " Jan 28 14:19:03 crc kubenswrapper[4848]: I0128 14:19:03.916847 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-utilities\") pod \"89e0620f-240f-4f61-8630-67357f49a7f7\" (UID: \"89e0620f-240f-4f61-8630-67357f49a7f7\") " Jan 28 14:19:03 crc kubenswrapper[4848]: I0128 14:19:03.918057 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-utilities" (OuterVolumeSpecName: "utilities") pod "89e0620f-240f-4f61-8630-67357f49a7f7" (UID: "89e0620f-240f-4f61-8630-67357f49a7f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:19:03 crc kubenswrapper[4848]: I0128 14:19:03.939525 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89e0620f-240f-4f61-8630-67357f49a7f7-kube-api-access-dgz2n" (OuterVolumeSpecName: "kube-api-access-dgz2n") pod "89e0620f-240f-4f61-8630-67357f49a7f7" (UID: "89e0620f-240f-4f61-8630-67357f49a7f7"). InnerVolumeSpecName "kube-api-access-dgz2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.020363 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgz2n\" (UniqueName: \"kubernetes.io/projected/89e0620f-240f-4f61-8630-67357f49a7f7-kube-api-access-dgz2n\") on node \"crc\" DevicePath \"\"" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.020396 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.057683 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89e0620f-240f-4f61-8630-67357f49a7f7" (UID: "89e0620f-240f-4f61-8630-67357f49a7f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.122827 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89e0620f-240f-4f61-8630-67357f49a7f7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.306646 4848 generic.go:334] "Generic (PLEG): container finished" podID="89e0620f-240f-4f61-8630-67357f49a7f7" containerID="c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56" exitCode=0 Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.306699 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g96sp" event={"ID":"89e0620f-240f-4f61-8630-67357f49a7f7","Type":"ContainerDied","Data":"c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56"} Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.306730 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g96sp" event={"ID":"89e0620f-240f-4f61-8630-67357f49a7f7","Type":"ContainerDied","Data":"ba685b69e9ae70cddd31ba94900f57a133b9b78270ed4101b71a74113a31698b"} Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.306740 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g96sp" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.306749 4848 scope.go:117] "RemoveContainer" containerID="c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.342410 4848 scope.go:117] "RemoveContainer" containerID="e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.356567 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g96sp"] Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.381118 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-g96sp"] Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.384346 4848 scope.go:117] "RemoveContainer" containerID="65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.437537 4848 scope.go:117] "RemoveContainer" containerID="c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56" Jan 28 14:19:04 crc kubenswrapper[4848]: E0128 14:19:04.438287 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56\": container with ID starting with c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56 not found: ID does not exist" containerID="c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.438345 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56"} err="failed to get container status \"c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56\": rpc error: code = NotFound desc = could not find container \"c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56\": container with ID starting with c1183530a42ba6a0666158259018e2c13e852b496f152691a7d4b5088d4dcb56 not found: ID does not exist" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.438371 4848 scope.go:117] "RemoveContainer" containerID="e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00" Jan 28 14:19:04 crc kubenswrapper[4848]: E0128 14:19:04.439141 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00\": container with ID starting with e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00 not found: ID does not exist" containerID="e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.439277 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00"} err="failed to get container status \"e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00\": rpc error: code = NotFound desc = could not find container \"e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00\": container with ID starting with e8a686b72333df01228c527426452294dbd46e95a4ae2c1dc77aff5c5ea66b00 not found: ID does not exist" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.442656 4848 scope.go:117] "RemoveContainer" containerID="65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13" Jan 28 14:19:04 crc kubenswrapper[4848]: E0128 14:19:04.444727 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13\": container with ID starting with 65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13 not found: ID does not exist" containerID="65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.444791 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13"} err="failed to get container status \"65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13\": rpc error: code = NotFound desc = could not find container \"65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13\": container with ID starting with 65b8eb7c70d21b3a431fdfc80afd87cfc166f8304c6771608758dfae99c4ac13 not found: ID does not exist" Jan 28 14:19:04 crc kubenswrapper[4848]: I0128 14:19:04.871795 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" path="/var/lib/kubelet/pods/89e0620f-240f-4f61-8630-67357f49a7f7/volumes" Jan 28 14:19:07 crc kubenswrapper[4848]: I0128 14:19:07.851916 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:19:07 crc kubenswrapper[4848]: E0128 14:19:07.852897 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:19:19 crc kubenswrapper[4848]: I0128 14:19:19.850681 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:19:19 crc kubenswrapper[4848]: E0128 14:19:19.852002 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:19:32 crc kubenswrapper[4848]: I0128 14:19:32.850381 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:19:32 crc kubenswrapper[4848]: E0128 14:19:32.853508 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:19:47 crc kubenswrapper[4848]: I0128 14:19:47.852959 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:19:48 crc kubenswrapper[4848]: I0128 14:19:48.864972 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"85ce7578267f96fcfe0593811bd362f1bfede91461e853c955b4ba8d74952d50"} Jan 28 14:21:05 crc kubenswrapper[4848]: I0128 14:21:05.996170 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r4qzt"] Jan 28 14:21:05 crc kubenswrapper[4848]: E0128 14:21:05.999465 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="extract-utilities" Jan 28 14:21:05 crc kubenswrapper[4848]: I0128 14:21:05.999697 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="extract-utilities" Jan 28 14:21:05 crc kubenswrapper[4848]: E0128 14:21:05.999850 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="registry-server" Jan 28 14:21:05 crc kubenswrapper[4848]: I0128 14:21:05.999927 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="registry-server" Jan 28 14:21:06 crc kubenswrapper[4848]: E0128 14:21:06.000023 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="extract-content" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.000096 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="extract-content" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.000500 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="89e0620f-240f-4f61-8630-67357f49a7f7" containerName="registry-server" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.002531 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.019910 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r4qzt"] Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.024201 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w9q7\" (UniqueName: \"kubernetes.io/projected/a42140ff-9e30-45bc-8183-14349993db24-kube-api-access-4w9q7\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.024395 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-catalog-content\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.024446 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-utilities\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.127037 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-utilities\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.127520 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w9q7\" (UniqueName: \"kubernetes.io/projected/a42140ff-9e30-45bc-8183-14349993db24-kube-api-access-4w9q7\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.127593 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-utilities\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.127684 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-catalog-content\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.128098 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-catalog-content\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.158413 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w9q7\" (UniqueName: \"kubernetes.io/projected/a42140ff-9e30-45bc-8183-14349993db24-kube-api-access-4w9q7\") pod \"community-operators-r4qzt\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.380204 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:06 crc kubenswrapper[4848]: I0128 14:21:06.942209 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r4qzt"] Jan 28 14:21:07 crc kubenswrapper[4848]: I0128 14:21:07.838693 4848 generic.go:334] "Generic (PLEG): container finished" podID="a42140ff-9e30-45bc-8183-14349993db24" containerID="62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927" exitCode=0 Jan 28 14:21:07 crc kubenswrapper[4848]: I0128 14:21:07.838827 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r4qzt" event={"ID":"a42140ff-9e30-45bc-8183-14349993db24","Type":"ContainerDied","Data":"62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927"} Jan 28 14:21:07 crc kubenswrapper[4848]: I0128 14:21:07.839115 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r4qzt" event={"ID":"a42140ff-9e30-45bc-8183-14349993db24","Type":"ContainerStarted","Data":"a4b019ab09d62062cd040cf8e114a7dad13719dfb186d79f13a163e4cb446059"} Jan 28 14:21:08 crc kubenswrapper[4848]: I0128 14:21:08.863388 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r4qzt" event={"ID":"a42140ff-9e30-45bc-8183-14349993db24","Type":"ContainerStarted","Data":"3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d"} Jan 28 14:21:10 crc kubenswrapper[4848]: I0128 14:21:10.869799 4848 generic.go:334] "Generic (PLEG): container finished" podID="a42140ff-9e30-45bc-8183-14349993db24" containerID="3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d" exitCode=0 Jan 28 14:21:10 crc kubenswrapper[4848]: I0128 14:21:10.869874 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r4qzt" event={"ID":"a42140ff-9e30-45bc-8183-14349993db24","Type":"ContainerDied","Data":"3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d"} Jan 28 14:21:11 crc kubenswrapper[4848]: I0128 14:21:11.886114 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r4qzt" event={"ID":"a42140ff-9e30-45bc-8183-14349993db24","Type":"ContainerStarted","Data":"10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f"} Jan 28 14:21:11 crc kubenswrapper[4848]: I0128 14:21:11.915611 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r4qzt" podStartSLOduration=3.508415823 podStartE2EDuration="6.915582869s" podCreationTimestamp="2026-01-28 14:21:05 +0000 UTC" firstStartedPulling="2026-01-28 14:21:07.841344572 +0000 UTC m=+5694.753561610" lastFinishedPulling="2026-01-28 14:21:11.248511608 +0000 UTC m=+5698.160728656" observedRunningTime="2026-01-28 14:21:11.903576797 +0000 UTC m=+5698.815793835" watchObservedRunningTime="2026-01-28 14:21:11.915582869 +0000 UTC m=+5698.827799917" Jan 28 14:21:16 crc kubenswrapper[4848]: I0128 14:21:16.381406 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:16 crc kubenswrapper[4848]: I0128 14:21:16.382603 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:16 crc kubenswrapper[4848]: I0128 14:21:16.444743 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:16 crc kubenswrapper[4848]: I0128 14:21:16.998524 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:17 crc kubenswrapper[4848]: I0128 14:21:17.053578 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r4qzt"] Jan 28 14:21:18 crc kubenswrapper[4848]: I0128 14:21:18.971487 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-r4qzt" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="registry-server" containerID="cri-o://10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f" gracePeriod=2 Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.518739 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.659799 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4w9q7\" (UniqueName: \"kubernetes.io/projected/a42140ff-9e30-45bc-8183-14349993db24-kube-api-access-4w9q7\") pod \"a42140ff-9e30-45bc-8183-14349993db24\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.659934 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-catalog-content\") pod \"a42140ff-9e30-45bc-8183-14349993db24\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.660009 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-utilities\") pod \"a42140ff-9e30-45bc-8183-14349993db24\" (UID: \"a42140ff-9e30-45bc-8183-14349993db24\") " Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.661056 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-utilities" (OuterVolumeSpecName: "utilities") pod "a42140ff-9e30-45bc-8183-14349993db24" (UID: "a42140ff-9e30-45bc-8183-14349993db24"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.672715 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a42140ff-9e30-45bc-8183-14349993db24-kube-api-access-4w9q7" (OuterVolumeSpecName: "kube-api-access-4w9q7") pod "a42140ff-9e30-45bc-8183-14349993db24" (UID: "a42140ff-9e30-45bc-8183-14349993db24"). InnerVolumeSpecName "kube-api-access-4w9q7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.721302 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a42140ff-9e30-45bc-8183-14349993db24" (UID: "a42140ff-9e30-45bc-8183-14349993db24"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.763558 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4w9q7\" (UniqueName: \"kubernetes.io/projected/a42140ff-9e30-45bc-8183-14349993db24-kube-api-access-4w9q7\") on node \"crc\" DevicePath \"\"" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.763614 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.763631 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a42140ff-9e30-45bc-8183-14349993db24-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.984483 4848 generic.go:334] "Generic (PLEG): container finished" podID="a42140ff-9e30-45bc-8183-14349993db24" containerID="10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f" exitCode=0 Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.984547 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r4qzt" Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.984567 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r4qzt" event={"ID":"a42140ff-9e30-45bc-8183-14349993db24","Type":"ContainerDied","Data":"10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f"} Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.984620 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r4qzt" event={"ID":"a42140ff-9e30-45bc-8183-14349993db24","Type":"ContainerDied","Data":"a4b019ab09d62062cd040cf8e114a7dad13719dfb186d79f13a163e4cb446059"} Jan 28 14:21:19 crc kubenswrapper[4848]: I0128 14:21:19.984647 4848 scope.go:117] "RemoveContainer" containerID="10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.032508 4848 scope.go:117] "RemoveContainer" containerID="3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.042226 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r4qzt"] Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.054015 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-r4qzt"] Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.074960 4848 scope.go:117] "RemoveContainer" containerID="62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.124670 4848 scope.go:117] "RemoveContainer" containerID="10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f" Jan 28 14:21:20 crc kubenswrapper[4848]: E0128 14:21:20.125550 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f\": container with ID starting with 10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f not found: ID does not exist" containerID="10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.125585 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f"} err="failed to get container status \"10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f\": rpc error: code = NotFound desc = could not find container \"10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f\": container with ID starting with 10aeb336b06db9f575803f61f60d4fa592b009694bf36b708516f377a752cc2f not found: ID does not exist" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.125615 4848 scope.go:117] "RemoveContainer" containerID="3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d" Jan 28 14:21:20 crc kubenswrapper[4848]: E0128 14:21:20.125882 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d\": container with ID starting with 3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d not found: ID does not exist" containerID="3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.125917 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d"} err="failed to get container status \"3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d\": rpc error: code = NotFound desc = could not find container \"3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d\": container with ID starting with 3dea03156c02a932d57899299d7372e9cf21572f57b924a2c09eb3aebf01473d not found: ID does not exist" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.125937 4848 scope.go:117] "RemoveContainer" containerID="62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927" Jan 28 14:21:20 crc kubenswrapper[4848]: E0128 14:21:20.126592 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927\": container with ID starting with 62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927 not found: ID does not exist" containerID="62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.126620 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927"} err="failed to get container status \"62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927\": rpc error: code = NotFound desc = could not find container \"62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927\": container with ID starting with 62f410e5d06825b39cc69f0dbf6e8d4aa8307c5a86288bf3426e804f8f80c927 not found: ID does not exist" Jan 28 14:21:20 crc kubenswrapper[4848]: I0128 14:21:20.862959 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a42140ff-9e30-45bc-8183-14349993db24" path="/var/lib/kubelet/pods/a42140ff-9e30-45bc-8183-14349993db24/volumes" Jan 28 14:22:07 crc kubenswrapper[4848]: I0128 14:22:07.924922 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:22:07 crc kubenswrapper[4848]: I0128 14:22:07.926777 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.387334 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j9xfq"] Jan 28 14:22:24 crc kubenswrapper[4848]: E0128 14:22:24.390437 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="extract-utilities" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.390586 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="extract-utilities" Jan 28 14:22:24 crc kubenswrapper[4848]: E0128 14:22:24.390715 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="extract-content" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.390777 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="extract-content" Jan 28 14:22:24 crc kubenswrapper[4848]: E0128 14:22:24.390848 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="registry-server" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.390904 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="registry-server" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.391205 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="a42140ff-9e30-45bc-8183-14349993db24" containerName="registry-server" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.393460 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.401082 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j9xfq"] Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.476927 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-utilities\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.477113 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2cm5\" (UniqueName: \"kubernetes.io/projected/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-kube-api-access-m2cm5\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.477198 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-catalog-content\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.580853 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-utilities\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.581482 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2cm5\" (UniqueName: \"kubernetes.io/projected/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-kube-api-access-m2cm5\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.581525 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-catalog-content\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.581548 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-utilities\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.581979 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-catalog-content\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.615873 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2cm5\" (UniqueName: \"kubernetes.io/projected/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-kube-api-access-m2cm5\") pod \"redhat-marketplace-j9xfq\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:24 crc kubenswrapper[4848]: I0128 14:22:24.734180 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:25 crc kubenswrapper[4848]: I0128 14:22:25.281315 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j9xfq"] Jan 28 14:22:25 crc kubenswrapper[4848]: W0128 14:22:25.304938 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod967f4ec0_d50e_4aa5_8b54_fcc7ee2b7de2.slice/crio-2a9a83743b6eea131e6858b95d1bf6cc9868242e9d5f80612c9e7c8f6e743f25 WatchSource:0}: Error finding container 2a9a83743b6eea131e6858b95d1bf6cc9868242e9d5f80612c9e7c8f6e743f25: Status 404 returned error can't find the container with id 2a9a83743b6eea131e6858b95d1bf6cc9868242e9d5f80612c9e7c8f6e743f25 Jan 28 14:22:25 crc kubenswrapper[4848]: I0128 14:22:25.776556 4848 generic.go:334] "Generic (PLEG): container finished" podID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerID="3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333" exitCode=0 Jan 28 14:22:25 crc kubenswrapper[4848]: I0128 14:22:25.776619 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9xfq" event={"ID":"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2","Type":"ContainerDied","Data":"3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333"} Jan 28 14:22:25 crc kubenswrapper[4848]: I0128 14:22:25.776922 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9xfq" event={"ID":"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2","Type":"ContainerStarted","Data":"2a9a83743b6eea131e6858b95d1bf6cc9868242e9d5f80612c9e7c8f6e743f25"} Jan 28 14:22:26 crc kubenswrapper[4848]: I0128 14:22:26.790891 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9xfq" event={"ID":"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2","Type":"ContainerStarted","Data":"220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553"} Jan 28 14:22:27 crc kubenswrapper[4848]: I0128 14:22:27.808712 4848 generic.go:334] "Generic (PLEG): container finished" podID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerID="220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553" exitCode=0 Jan 28 14:22:27 crc kubenswrapper[4848]: I0128 14:22:27.808861 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9xfq" event={"ID":"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2","Type":"ContainerDied","Data":"220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553"} Jan 28 14:22:28 crc kubenswrapper[4848]: I0128 14:22:28.824232 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9xfq" event={"ID":"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2","Type":"ContainerStarted","Data":"8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e"} Jan 28 14:22:28 crc kubenswrapper[4848]: I0128 14:22:28.853949 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j9xfq" podStartSLOduration=2.38139 podStartE2EDuration="4.853918544s" podCreationTimestamp="2026-01-28 14:22:24 +0000 UTC" firstStartedPulling="2026-01-28 14:22:25.778923476 +0000 UTC m=+5772.691140514" lastFinishedPulling="2026-01-28 14:22:28.25145202 +0000 UTC m=+5775.163669058" observedRunningTime="2026-01-28 14:22:28.845655558 +0000 UTC m=+5775.757872606" watchObservedRunningTime="2026-01-28 14:22:28.853918544 +0000 UTC m=+5775.766135582" Jan 28 14:22:34 crc kubenswrapper[4848]: I0128 14:22:34.734461 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:34 crc kubenswrapper[4848]: I0128 14:22:34.736175 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:35 crc kubenswrapper[4848]: I0128 14:22:35.057946 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:35 crc kubenswrapper[4848]: I0128 14:22:35.985675 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:36 crc kubenswrapper[4848]: I0128 14:22:36.966769 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j9xfq"] Jan 28 14:22:37 crc kubenswrapper[4848]: I0128 14:22:37.924199 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:22:37 crc kubenswrapper[4848]: I0128 14:22:37.924291 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:22:37 crc kubenswrapper[4848]: I0128 14:22:37.930642 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j9xfq" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="registry-server" containerID="cri-o://8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e" gracePeriod=2 Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.580603 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.676913 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-utilities\") pod \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.677149 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2cm5\" (UniqueName: \"kubernetes.io/projected/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-kube-api-access-m2cm5\") pod \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.677411 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-catalog-content\") pod \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\" (UID: \"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2\") " Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.677905 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-utilities" (OuterVolumeSpecName: "utilities") pod "967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" (UID: "967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.678433 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.684426 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-kube-api-access-m2cm5" (OuterVolumeSpecName: "kube-api-access-m2cm5") pod "967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" (UID: "967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2"). InnerVolumeSpecName "kube-api-access-m2cm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.700193 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" (UID: "967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.780470 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.780526 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2cm5\" (UniqueName: \"kubernetes.io/projected/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2-kube-api-access-m2cm5\") on node \"crc\" DevicePath \"\"" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.944021 4848 generic.go:334] "Generic (PLEG): container finished" podID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerID="8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e" exitCode=0 Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.944071 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9xfq" event={"ID":"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2","Type":"ContainerDied","Data":"8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e"} Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.944102 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9xfq" event={"ID":"967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2","Type":"ContainerDied","Data":"2a9a83743b6eea131e6858b95d1bf6cc9868242e9d5f80612c9e7c8f6e743f25"} Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.944123 4848 scope.go:117] "RemoveContainer" containerID="8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.944124 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j9xfq" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.976491 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j9xfq"] Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.988585 4848 scope.go:117] "RemoveContainer" containerID="220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553" Jan 28 14:22:38 crc kubenswrapper[4848]: I0128 14:22:38.989435 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j9xfq"] Jan 28 14:22:39 crc kubenswrapper[4848]: I0128 14:22:39.024376 4848 scope.go:117] "RemoveContainer" containerID="3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333" Jan 28 14:22:39 crc kubenswrapper[4848]: I0128 14:22:39.076298 4848 scope.go:117] "RemoveContainer" containerID="8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e" Jan 28 14:22:39 crc kubenswrapper[4848]: E0128 14:22:39.076978 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e\": container with ID starting with 8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e not found: ID does not exist" containerID="8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e" Jan 28 14:22:39 crc kubenswrapper[4848]: I0128 14:22:39.077023 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e"} err="failed to get container status \"8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e\": rpc error: code = NotFound desc = could not find container \"8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e\": container with ID starting with 8b7255ebf4dc6baa2ac7ad3694a46c320fdce5498bc3cfdf1276fe97c793e35e not found: ID does not exist" Jan 28 14:22:39 crc kubenswrapper[4848]: I0128 14:22:39.077049 4848 scope.go:117] "RemoveContainer" containerID="220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553" Jan 28 14:22:39 crc kubenswrapper[4848]: E0128 14:22:39.077571 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553\": container with ID starting with 220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553 not found: ID does not exist" containerID="220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553" Jan 28 14:22:39 crc kubenswrapper[4848]: I0128 14:22:39.077751 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553"} err="failed to get container status \"220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553\": rpc error: code = NotFound desc = could not find container \"220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553\": container with ID starting with 220f095817b7b4f868dbfd46f366292a5998c679dec6bcf0725c3f8a96240553 not found: ID does not exist" Jan 28 14:22:39 crc kubenswrapper[4848]: I0128 14:22:39.077917 4848 scope.go:117] "RemoveContainer" containerID="3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333" Jan 28 14:22:39 crc kubenswrapper[4848]: E0128 14:22:39.078494 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333\": container with ID starting with 3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333 not found: ID does not exist" containerID="3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333" Jan 28 14:22:39 crc kubenswrapper[4848]: I0128 14:22:39.078524 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333"} err="failed to get container status \"3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333\": rpc error: code = NotFound desc = could not find container \"3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333\": container with ID starting with 3111847e123ed3c5ec69232c8e87f13ab61363152826bfe027c9958ffa286333 not found: ID does not exist" Jan 28 14:22:40 crc kubenswrapper[4848]: I0128 14:22:40.868746 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" path="/var/lib/kubelet/pods/967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2/volumes" Jan 28 14:23:07 crc kubenswrapper[4848]: I0128 14:23:07.924803 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:23:07 crc kubenswrapper[4848]: I0128 14:23:07.925452 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:23:07 crc kubenswrapper[4848]: I0128 14:23:07.925514 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:23:07 crc kubenswrapper[4848]: I0128 14:23:07.926226 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"85ce7578267f96fcfe0593811bd362f1bfede91461e853c955b4ba8d74952d50"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:23:07 crc kubenswrapper[4848]: I0128 14:23:07.926324 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://85ce7578267f96fcfe0593811bd362f1bfede91461e853c955b4ba8d74952d50" gracePeriod=600 Jan 28 14:23:08 crc kubenswrapper[4848]: I0128 14:23:08.273293 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="85ce7578267f96fcfe0593811bd362f1bfede91461e853c955b4ba8d74952d50" exitCode=0 Jan 28 14:23:08 crc kubenswrapper[4848]: I0128 14:23:08.273355 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"85ce7578267f96fcfe0593811bd362f1bfede91461e853c955b4ba8d74952d50"} Jan 28 14:23:08 crc kubenswrapper[4848]: I0128 14:23:08.273592 4848 scope.go:117] "RemoveContainer" containerID="1f32540ac26c91b078724ef1b674f8a1ed4d7030b2f6e2f30f892c470e10b7e7" Jan 28 14:23:09 crc kubenswrapper[4848]: I0128 14:23:09.285581 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219"} Jan 28 14:23:48 crc kubenswrapper[4848]: I0128 14:23:48.763558 4848 generic.go:334] "Generic (PLEG): container finished" podID="08f6c3e6-eb26-471d-947f-11cb5533c6c8" containerID="905eac889a53bd9fbe0fcf3d280b37b631b45b4f997b876dd49d9a4db45cd054" exitCode=0 Jan 28 14:23:48 crc kubenswrapper[4848]: I0128 14:23:48.763653 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"08f6c3e6-eb26-471d-947f-11cb5533c6c8","Type":"ContainerDied","Data":"905eac889a53bd9fbe0fcf3d280b37b631b45b4f997b876dd49d9a4db45cd054"} Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.295581 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.441551 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ssh-key\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.441661 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ca-certs\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.441687 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-config-data\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.441744 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config-secret\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.441799 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.441928 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-workdir\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.441987 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.442059 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-temporary\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.442093 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6p7t\" (UniqueName: \"kubernetes.io/projected/08f6c3e6-eb26-471d-947f-11cb5533c6c8-kube-api-access-z6p7t\") pod \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\" (UID: \"08f6c3e6-eb26-471d-947f-11cb5533c6c8\") " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.443134 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-config-data" (OuterVolumeSpecName: "config-data") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.443665 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.451451 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.456669 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "test-operator-logs") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.457632 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08f6c3e6-eb26-471d-947f-11cb5533c6c8-kube-api-access-z6p7t" (OuterVolumeSpecName: "kube-api-access-z6p7t") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "kube-api-access-z6p7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.485554 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.485590 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.486551 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.512760 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "08f6c3e6-eb26-471d-947f-11cb5533c6c8" (UID: "08f6c3e6-eb26-471d-947f-11cb5533c6c8"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546113 4848 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546476 4848 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546534 4848 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/08f6c3e6-eb26-471d-947f-11cb5533c6c8-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546551 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6p7t\" (UniqueName: \"kubernetes.io/projected/08f6c3e6-eb26-471d-947f-11cb5533c6c8-kube-api-access-z6p7t\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546567 4848 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546584 4848 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546595 4848 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08f6c3e6-eb26-471d-947f-11cb5533c6c8-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546607 4848 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08f6c3e6-eb26-471d-947f-11cb5533c6c8-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.546677 4848 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.572567 4848 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.649201 4848 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.804075 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"08f6c3e6-eb26-471d-947f-11cb5533c6c8","Type":"ContainerDied","Data":"40a936dbf61fdecb41626cfe31499aa7382f09aea2977b32056f8117792369c2"} Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.804150 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40a936dbf61fdecb41626cfe31499aa7382f09aea2977b32056f8117792369c2" Jan 28 14:23:50 crc kubenswrapper[4848]: I0128 14:23:50.804199 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.102994 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 28 14:24:00 crc kubenswrapper[4848]: E0128 14:24:00.104888 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08f6c3e6-eb26-471d-947f-11cb5533c6c8" containerName="tempest-tests-tempest-tests-runner" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.104916 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="08f6c3e6-eb26-471d-947f-11cb5533c6c8" containerName="tempest-tests-tempest-tests-runner" Jan 28 14:24:00 crc kubenswrapper[4848]: E0128 14:24:00.104935 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="extract-utilities" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.104946 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="extract-utilities" Jan 28 14:24:00 crc kubenswrapper[4848]: E0128 14:24:00.104986 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="registry-server" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.104995 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="registry-server" Jan 28 14:24:00 crc kubenswrapper[4848]: E0128 14:24:00.105031 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="extract-content" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.105040 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="extract-content" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.105377 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="08f6c3e6-eb26-471d-947f-11cb5533c6c8" containerName="tempest-tests-tempest-tests-runner" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.105413 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="967f4ec0-d50e-4aa5-8b54-fcc7ee2b7de2" containerName="registry-server" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.106768 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.115121 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-4tj7f" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.115893 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.213532 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"f8682e28-9944-4b82-b3d0-f6e6eca96b93\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.213666 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmstp\" (UniqueName: \"kubernetes.io/projected/f8682e28-9944-4b82-b3d0-f6e6eca96b93-kube-api-access-gmstp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"f8682e28-9944-4b82-b3d0-f6e6eca96b93\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.315678 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"f8682e28-9944-4b82-b3d0-f6e6eca96b93\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.315774 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmstp\" (UniqueName: \"kubernetes.io/projected/f8682e28-9944-4b82-b3d0-f6e6eca96b93-kube-api-access-gmstp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"f8682e28-9944-4b82-b3d0-f6e6eca96b93\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.316467 4848 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"f8682e28-9944-4b82-b3d0-f6e6eca96b93\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.345990 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmstp\" (UniqueName: \"kubernetes.io/projected/f8682e28-9944-4b82-b3d0-f6e6eca96b93-kube-api-access-gmstp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"f8682e28-9944-4b82-b3d0-f6e6eca96b93\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.357554 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"f8682e28-9944-4b82-b3d0-f6e6eca96b93\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.444206 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.774007 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 28 14:24:00 crc kubenswrapper[4848]: W0128 14:24:00.785140 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8682e28_9944_4b82_b3d0_f6e6eca96b93.slice/crio-a65e8f7f9d66d334020af08b953f1b519da2d4d03aaf15750778e1f180b2a702 WatchSource:0}: Error finding container a65e8f7f9d66d334020af08b953f1b519da2d4d03aaf15750778e1f180b2a702: Status 404 returned error can't find the container with id a65e8f7f9d66d334020af08b953f1b519da2d4d03aaf15750778e1f180b2a702 Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.790668 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 14:24:00 crc kubenswrapper[4848]: I0128 14:24:00.951441 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"f8682e28-9944-4b82-b3d0-f6e6eca96b93","Type":"ContainerStarted","Data":"a65e8f7f9d66d334020af08b953f1b519da2d4d03aaf15750778e1f180b2a702"} Jan 28 14:24:02 crc kubenswrapper[4848]: I0128 14:24:02.982822 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"f8682e28-9944-4b82-b3d0-f6e6eca96b93","Type":"ContainerStarted","Data":"eeaba5ce85b78a602b5e7b79239bbb548c59ad816bd5a2c409416020ae07e5f1"} Jan 28 14:24:03 crc kubenswrapper[4848]: I0128 14:24:03.004986 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.032598219 podStartE2EDuration="3.004962089s" podCreationTimestamp="2026-01-28 14:24:00 +0000 UTC" firstStartedPulling="2026-01-28 14:24:00.790334203 +0000 UTC m=+5867.702551241" lastFinishedPulling="2026-01-28 14:24:01.762698073 +0000 UTC m=+5868.674915111" observedRunningTime="2026-01-28 14:24:03.002765709 +0000 UTC m=+5869.914982767" watchObservedRunningTime="2026-01-28 14:24:03.004962089 +0000 UTC m=+5869.917179127" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.712829 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-29w6z/must-gather-wb4fr"] Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.715283 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.717436 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-29w6z"/"kube-root-ca.crt" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.717827 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-29w6z"/"default-dockercfg-pc4sb" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.718035 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-29w6z"/"openshift-service-ca.crt" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.729003 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-29w6z/must-gather-wb4fr"] Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.772695 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-must-gather-output\") pod \"must-gather-wb4fr\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.772757 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn5z2\" (UniqueName: \"kubernetes.io/projected/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-kube-api-access-sn5z2\") pod \"must-gather-wb4fr\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.876111 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-must-gather-output\") pod \"must-gather-wb4fr\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.876160 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn5z2\" (UniqueName: \"kubernetes.io/projected/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-kube-api-access-sn5z2\") pod \"must-gather-wb4fr\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.877779 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-must-gather-output\") pod \"must-gather-wb4fr\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:27 crc kubenswrapper[4848]: I0128 14:24:27.897875 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn5z2\" (UniqueName: \"kubernetes.io/projected/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-kube-api-access-sn5z2\") pod \"must-gather-wb4fr\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:28 crc kubenswrapper[4848]: I0128 14:24:28.038238 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:24:28 crc kubenswrapper[4848]: I0128 14:24:28.840874 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-29w6z/must-gather-wb4fr"] Jan 28 14:24:29 crc kubenswrapper[4848]: I0128 14:24:29.328836 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/must-gather-wb4fr" event={"ID":"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760","Type":"ContainerStarted","Data":"cff0e699f1299cc62ac8889f3fc5da8ff3de2f09d6bbc3a881b07e43598642b2"} Jan 28 14:24:38 crc kubenswrapper[4848]: I0128 14:24:38.442643 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/must-gather-wb4fr" event={"ID":"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760","Type":"ContainerStarted","Data":"21132d3d2d36017e29e4a4b15892a72e6dd3827d7c62b837d01d41f77b30b20c"} Jan 28 14:24:38 crc kubenswrapper[4848]: I0128 14:24:38.443616 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/must-gather-wb4fr" event={"ID":"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760","Type":"ContainerStarted","Data":"5c9941bb402897ef4e6a4e943aad058d65b90eb5240e7d8bcec35f872573f084"} Jan 28 14:24:38 crc kubenswrapper[4848]: I0128 14:24:38.470135 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-29w6z/must-gather-wb4fr" podStartSLOduration=2.871542644 podStartE2EDuration="11.470104704s" podCreationTimestamp="2026-01-28 14:24:27 +0000 UTC" firstStartedPulling="2026-01-28 14:24:28.846120357 +0000 UTC m=+5895.758337395" lastFinishedPulling="2026-01-28 14:24:37.444682417 +0000 UTC m=+5904.356899455" observedRunningTime="2026-01-28 14:24:38.458716114 +0000 UTC m=+5905.370933172" watchObservedRunningTime="2026-01-28 14:24:38.470104704 +0000 UTC m=+5905.382321752" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.220414 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-29w6z/crc-debug-xqwt4"] Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.222367 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.382782 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c26a9af-3b47-4555-8cc9-1ce23eee9915-host\") pod \"crc-debug-xqwt4\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.382903 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qc8l\" (UniqueName: \"kubernetes.io/projected/1c26a9af-3b47-4555-8cc9-1ce23eee9915-kube-api-access-9qc8l\") pod \"crc-debug-xqwt4\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.486162 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c26a9af-3b47-4555-8cc9-1ce23eee9915-host\") pod \"crc-debug-xqwt4\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.486594 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qc8l\" (UniqueName: \"kubernetes.io/projected/1c26a9af-3b47-4555-8cc9-1ce23eee9915-kube-api-access-9qc8l\") pod \"crc-debug-xqwt4\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.486396 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c26a9af-3b47-4555-8cc9-1ce23eee9915-host\") pod \"crc-debug-xqwt4\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.511208 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qc8l\" (UniqueName: \"kubernetes.io/projected/1c26a9af-3b47-4555-8cc9-1ce23eee9915-kube-api-access-9qc8l\") pod \"crc-debug-xqwt4\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: I0128 14:24:42.539228 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:24:42 crc kubenswrapper[4848]: W0128 14:24:42.579982 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c26a9af_3b47_4555_8cc9_1ce23eee9915.slice/crio-82c5e79d2668aa157be20b6db2c823340dd4cebbe7943743986b876a9cac87f7 WatchSource:0}: Error finding container 82c5e79d2668aa157be20b6db2c823340dd4cebbe7943743986b876a9cac87f7: Status 404 returned error can't find the container with id 82c5e79d2668aa157be20b6db2c823340dd4cebbe7943743986b876a9cac87f7 Jan 28 14:24:43 crc kubenswrapper[4848]: I0128 14:24:43.508048 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" event={"ID":"1c26a9af-3b47-4555-8cc9-1ce23eee9915","Type":"ContainerStarted","Data":"82c5e79d2668aa157be20b6db2c823340dd4cebbe7943743986b876a9cac87f7"} Jan 28 14:24:54 crc kubenswrapper[4848]: I0128 14:24:54.649460 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" event={"ID":"1c26a9af-3b47-4555-8cc9-1ce23eee9915","Type":"ContainerStarted","Data":"b876c30536bc9d3f55f1adcd299e0056dace96ab120b9c9bb01e513a5324f1b4"} Jan 28 14:24:54 crc kubenswrapper[4848]: I0128 14:24:54.673040 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" podStartSLOduration=0.995736738 podStartE2EDuration="12.673017196s" podCreationTimestamp="2026-01-28 14:24:42 +0000 UTC" firstStartedPulling="2026-01-28 14:24:42.582295293 +0000 UTC m=+5909.494512321" lastFinishedPulling="2026-01-28 14:24:54.259575741 +0000 UTC m=+5921.171792779" observedRunningTime="2026-01-28 14:24:54.666896089 +0000 UTC m=+5921.579113127" watchObservedRunningTime="2026-01-28 14:24:54.673017196 +0000 UTC m=+5921.585234234" Jan 28 14:25:37 crc kubenswrapper[4848]: I0128 14:25:37.924980 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:25:37 crc kubenswrapper[4848]: I0128 14:25:37.926954 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:25:50 crc kubenswrapper[4848]: I0128 14:25:50.321105 4848 generic.go:334] "Generic (PLEG): container finished" podID="1c26a9af-3b47-4555-8cc9-1ce23eee9915" containerID="b876c30536bc9d3f55f1adcd299e0056dace96ab120b9c9bb01e513a5324f1b4" exitCode=0 Jan 28 14:25:50 crc kubenswrapper[4848]: I0128 14:25:50.321835 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" event={"ID":"1c26a9af-3b47-4555-8cc9-1ce23eee9915","Type":"ContainerDied","Data":"b876c30536bc9d3f55f1adcd299e0056dace96ab120b9c9bb01e513a5324f1b4"} Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.501577 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.549131 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-29w6z/crc-debug-xqwt4"] Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.560511 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-29w6z/crc-debug-xqwt4"] Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.635867 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qc8l\" (UniqueName: \"kubernetes.io/projected/1c26a9af-3b47-4555-8cc9-1ce23eee9915-kube-api-access-9qc8l\") pod \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.636051 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c26a9af-3b47-4555-8cc9-1ce23eee9915-host\") pod \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\" (UID: \"1c26a9af-3b47-4555-8cc9-1ce23eee9915\") " Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.636931 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1c26a9af-3b47-4555-8cc9-1ce23eee9915-host" (OuterVolumeSpecName: "host") pod "1c26a9af-3b47-4555-8cc9-1ce23eee9915" (UID: "1c26a9af-3b47-4555-8cc9-1ce23eee9915"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.643483 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c26a9af-3b47-4555-8cc9-1ce23eee9915-kube-api-access-9qc8l" (OuterVolumeSpecName: "kube-api-access-9qc8l") pod "1c26a9af-3b47-4555-8cc9-1ce23eee9915" (UID: "1c26a9af-3b47-4555-8cc9-1ce23eee9915"). InnerVolumeSpecName "kube-api-access-9qc8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.739578 4848 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c26a9af-3b47-4555-8cc9-1ce23eee9915-host\") on node \"crc\" DevicePath \"\"" Jan 28 14:25:51 crc kubenswrapper[4848]: I0128 14:25:51.739645 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qc8l\" (UniqueName: \"kubernetes.io/projected/1c26a9af-3b47-4555-8cc9-1ce23eee9915-kube-api-access-9qc8l\") on node \"crc\" DevicePath \"\"" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.353727 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82c5e79d2668aa157be20b6db2c823340dd4cebbe7943743986b876a9cac87f7" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.354005 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xqwt4" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.833007 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-29w6z/crc-debug-hs9pk"] Jan 28 14:25:52 crc kubenswrapper[4848]: E0128 14:25:52.833725 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c26a9af-3b47-4555-8cc9-1ce23eee9915" containerName="container-00" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.833746 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c26a9af-3b47-4555-8cc9-1ce23eee9915" containerName="container-00" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.834055 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c26a9af-3b47-4555-8cc9-1ce23eee9915" containerName="container-00" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.835136 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.866330 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c26a9af-3b47-4555-8cc9-1ce23eee9915" path="/var/lib/kubelet/pods/1c26a9af-3b47-4555-8cc9-1ce23eee9915/volumes" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.972158 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5438a384-b3bf-475a-8d85-ef5565a46c7c-host\") pod \"crc-debug-hs9pk\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:52 crc kubenswrapper[4848]: I0128 14:25:52.972374 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhnb8\" (UniqueName: \"kubernetes.io/projected/5438a384-b3bf-475a-8d85-ef5565a46c7c-kube-api-access-dhnb8\") pod \"crc-debug-hs9pk\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:53 crc kubenswrapper[4848]: I0128 14:25:53.075793 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5438a384-b3bf-475a-8d85-ef5565a46c7c-host\") pod \"crc-debug-hs9pk\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:53 crc kubenswrapper[4848]: I0128 14:25:53.075897 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhnb8\" (UniqueName: \"kubernetes.io/projected/5438a384-b3bf-475a-8d85-ef5565a46c7c-kube-api-access-dhnb8\") pod \"crc-debug-hs9pk\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:53 crc kubenswrapper[4848]: I0128 14:25:53.076356 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5438a384-b3bf-475a-8d85-ef5565a46c7c-host\") pod \"crc-debug-hs9pk\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:53 crc kubenswrapper[4848]: I0128 14:25:53.096835 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhnb8\" (UniqueName: \"kubernetes.io/projected/5438a384-b3bf-475a-8d85-ef5565a46c7c-kube-api-access-dhnb8\") pod \"crc-debug-hs9pk\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:53 crc kubenswrapper[4848]: I0128 14:25:53.172075 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:53 crc kubenswrapper[4848]: I0128 14:25:53.375977 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-hs9pk" event={"ID":"5438a384-b3bf-475a-8d85-ef5565a46c7c","Type":"ContainerStarted","Data":"c2f0b0bd5f2dd1479ae2ce5fc59e00dfe51e6646361db7bade7a5561c3fe169a"} Jan 28 14:25:54 crc kubenswrapper[4848]: I0128 14:25:54.389518 4848 generic.go:334] "Generic (PLEG): container finished" podID="5438a384-b3bf-475a-8d85-ef5565a46c7c" containerID="f39eb12bb9b1a4002d2c0fe8c03f58a6437adc226c7d2dd6d2536a771183b769" exitCode=0 Jan 28 14:25:54 crc kubenswrapper[4848]: I0128 14:25:54.389609 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-hs9pk" event={"ID":"5438a384-b3bf-475a-8d85-ef5565a46c7c","Type":"ContainerDied","Data":"f39eb12bb9b1a4002d2c0fe8c03f58a6437adc226c7d2dd6d2536a771183b769"} Jan 28 14:25:55 crc kubenswrapper[4848]: I0128 14:25:55.542424 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:55 crc kubenswrapper[4848]: I0128 14:25:55.646360 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5438a384-b3bf-475a-8d85-ef5565a46c7c-host\") pod \"5438a384-b3bf-475a-8d85-ef5565a46c7c\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " Jan 28 14:25:55 crc kubenswrapper[4848]: I0128 14:25:55.646770 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhnb8\" (UniqueName: \"kubernetes.io/projected/5438a384-b3bf-475a-8d85-ef5565a46c7c-kube-api-access-dhnb8\") pod \"5438a384-b3bf-475a-8d85-ef5565a46c7c\" (UID: \"5438a384-b3bf-475a-8d85-ef5565a46c7c\") " Jan 28 14:25:55 crc kubenswrapper[4848]: I0128 14:25:55.646768 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5438a384-b3bf-475a-8d85-ef5565a46c7c-host" (OuterVolumeSpecName: "host") pod "5438a384-b3bf-475a-8d85-ef5565a46c7c" (UID: "5438a384-b3bf-475a-8d85-ef5565a46c7c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 14:25:55 crc kubenswrapper[4848]: I0128 14:25:55.673208 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5438a384-b3bf-475a-8d85-ef5565a46c7c-kube-api-access-dhnb8" (OuterVolumeSpecName: "kube-api-access-dhnb8") pod "5438a384-b3bf-475a-8d85-ef5565a46c7c" (UID: "5438a384-b3bf-475a-8d85-ef5565a46c7c"). InnerVolumeSpecName "kube-api-access-dhnb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:25:55 crc kubenswrapper[4848]: I0128 14:25:55.749725 4848 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5438a384-b3bf-475a-8d85-ef5565a46c7c-host\") on node \"crc\" DevicePath \"\"" Jan 28 14:25:55 crc kubenswrapper[4848]: I0128 14:25:55.749760 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhnb8\" (UniqueName: \"kubernetes.io/projected/5438a384-b3bf-475a-8d85-ef5565a46c7c-kube-api-access-dhnb8\") on node \"crc\" DevicePath \"\"" Jan 28 14:25:56 crc kubenswrapper[4848]: I0128 14:25:56.416728 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-hs9pk" event={"ID":"5438a384-b3bf-475a-8d85-ef5565a46c7c","Type":"ContainerDied","Data":"c2f0b0bd5f2dd1479ae2ce5fc59e00dfe51e6646361db7bade7a5561c3fe169a"} Jan 28 14:25:56 crc kubenswrapper[4848]: I0128 14:25:56.417078 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2f0b0bd5f2dd1479ae2ce5fc59e00dfe51e6646361db7bade7a5561c3fe169a" Jan 28 14:25:56 crc kubenswrapper[4848]: I0128 14:25:56.417273 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-hs9pk" Jan 28 14:25:56 crc kubenswrapper[4848]: I0128 14:25:56.758847 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-29w6z/crc-debug-hs9pk"] Jan 28 14:25:56 crc kubenswrapper[4848]: I0128 14:25:56.778528 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-29w6z/crc-debug-hs9pk"] Jan 28 14:25:56 crc kubenswrapper[4848]: I0128 14:25:56.863807 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5438a384-b3bf-475a-8d85-ef5565a46c7c" path="/var/lib/kubelet/pods/5438a384-b3bf-475a-8d85-ef5565a46c7c/volumes" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.005894 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-29w6z/crc-debug-xkmd8"] Jan 28 14:25:58 crc kubenswrapper[4848]: E0128 14:25:58.007374 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5438a384-b3bf-475a-8d85-ef5565a46c7c" containerName="container-00" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.007456 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5438a384-b3bf-475a-8d85-ef5565a46c7c" containerName="container-00" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.007753 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5438a384-b3bf-475a-8d85-ef5565a46c7c" containerName="container-00" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.012960 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.111999 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9t8t\" (UniqueName: \"kubernetes.io/projected/aac2d42c-081f-4435-8818-d520b2495521-kube-api-access-x9t8t\") pod \"crc-debug-xkmd8\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.112130 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aac2d42c-081f-4435-8818-d520b2495521-host\") pod \"crc-debug-xkmd8\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.213972 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aac2d42c-081f-4435-8818-d520b2495521-host\") pod \"crc-debug-xkmd8\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.214207 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aac2d42c-081f-4435-8818-d520b2495521-host\") pod \"crc-debug-xkmd8\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.214566 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9t8t\" (UniqueName: \"kubernetes.io/projected/aac2d42c-081f-4435-8818-d520b2495521-kube-api-access-x9t8t\") pod \"crc-debug-xkmd8\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.237957 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9t8t\" (UniqueName: \"kubernetes.io/projected/aac2d42c-081f-4435-8818-d520b2495521-kube-api-access-x9t8t\") pod \"crc-debug-xkmd8\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.336072 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:25:58 crc kubenswrapper[4848]: W0128 14:25:58.389585 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaac2d42c_081f_4435_8818_d520b2495521.slice/crio-cfba9c7283679a44caea2674d858e5f1bdf12b1e8e67af3b68d478818edca478 WatchSource:0}: Error finding container cfba9c7283679a44caea2674d858e5f1bdf12b1e8e67af3b68d478818edca478: Status 404 returned error can't find the container with id cfba9c7283679a44caea2674d858e5f1bdf12b1e8e67af3b68d478818edca478 Jan 28 14:25:58 crc kubenswrapper[4848]: I0128 14:25:58.472889 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-xkmd8" event={"ID":"aac2d42c-081f-4435-8818-d520b2495521","Type":"ContainerStarted","Data":"cfba9c7283679a44caea2674d858e5f1bdf12b1e8e67af3b68d478818edca478"} Jan 28 14:25:59 crc kubenswrapper[4848]: I0128 14:25:59.484925 4848 generic.go:334] "Generic (PLEG): container finished" podID="aac2d42c-081f-4435-8818-d520b2495521" containerID="0be1d086820fc930834d5bc0d02b0979c8b2a28eac1a2ae697e474df492d41b8" exitCode=0 Jan 28 14:25:59 crc kubenswrapper[4848]: I0128 14:25:59.485147 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/crc-debug-xkmd8" event={"ID":"aac2d42c-081f-4435-8818-d520b2495521","Type":"ContainerDied","Data":"0be1d086820fc930834d5bc0d02b0979c8b2a28eac1a2ae697e474df492d41b8"} Jan 28 14:25:59 crc kubenswrapper[4848]: I0128 14:25:59.533664 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-29w6z/crc-debug-xkmd8"] Jan 28 14:25:59 crc kubenswrapper[4848]: I0128 14:25:59.544461 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-29w6z/crc-debug-xkmd8"] Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.616822 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.677162 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9t8t\" (UniqueName: \"kubernetes.io/projected/aac2d42c-081f-4435-8818-d520b2495521-kube-api-access-x9t8t\") pod \"aac2d42c-081f-4435-8818-d520b2495521\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.677575 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aac2d42c-081f-4435-8818-d520b2495521-host\") pod \"aac2d42c-081f-4435-8818-d520b2495521\" (UID: \"aac2d42c-081f-4435-8818-d520b2495521\") " Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.677863 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aac2d42c-081f-4435-8818-d520b2495521-host" (OuterVolumeSpecName: "host") pod "aac2d42c-081f-4435-8818-d520b2495521" (UID: "aac2d42c-081f-4435-8818-d520b2495521"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.678558 4848 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aac2d42c-081f-4435-8818-d520b2495521-host\") on node \"crc\" DevicePath \"\"" Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.692478 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aac2d42c-081f-4435-8818-d520b2495521-kube-api-access-x9t8t" (OuterVolumeSpecName: "kube-api-access-x9t8t") pod "aac2d42c-081f-4435-8818-d520b2495521" (UID: "aac2d42c-081f-4435-8818-d520b2495521"). InnerVolumeSpecName "kube-api-access-x9t8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.781700 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9t8t\" (UniqueName: \"kubernetes.io/projected/aac2d42c-081f-4435-8818-d520b2495521-kube-api-access-x9t8t\") on node \"crc\" DevicePath \"\"" Jan 28 14:26:00 crc kubenswrapper[4848]: I0128 14:26:00.869315 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aac2d42c-081f-4435-8818-d520b2495521" path="/var/lib/kubelet/pods/aac2d42c-081f-4435-8818-d520b2495521/volumes" Jan 28 14:26:01 crc kubenswrapper[4848]: I0128 14:26:01.512526 4848 scope.go:117] "RemoveContainer" containerID="0be1d086820fc930834d5bc0d02b0979c8b2a28eac1a2ae697e474df492d41b8" Jan 28 14:26:01 crc kubenswrapper[4848]: I0128 14:26:01.512584 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/crc-debug-xkmd8" Jan 28 14:26:07 crc kubenswrapper[4848]: I0128 14:26:07.924182 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:26:07 crc kubenswrapper[4848]: I0128 14:26:07.924993 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:26:29 crc kubenswrapper[4848]: I0128 14:26:29.330635 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-669cc887b-rnh7b_feba4e43-dc8d-455e-a760-82f68f781511/barbican-api/0.log" Jan 28 14:26:29 crc kubenswrapper[4848]: I0128 14:26:29.541108 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-669cc887b-rnh7b_feba4e43-dc8d-455e-a760-82f68f781511/barbican-api-log/0.log" Jan 28 14:26:29 crc kubenswrapper[4848]: I0128 14:26:29.722530 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5cb69d9f6b-f9ck5_7a72021f-6e14-4681-b127-7c85be7c597c/barbican-keystone-listener/0.log" Jan 28 14:26:29 crc kubenswrapper[4848]: I0128 14:26:29.737652 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5cb69d9f6b-f9ck5_7a72021f-6e14-4681-b127-7c85be7c597c/barbican-keystone-listener-log/0.log" Jan 28 14:26:29 crc kubenswrapper[4848]: I0128 14:26:29.946168 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bd96c8879-gdtwm_a56e7c4e-4ce2-4742-8645-6201f8c957f7/barbican-worker/0.log" Jan 28 14:26:29 crc kubenswrapper[4848]: I0128 14:26:29.965635 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bd96c8879-gdtwm_a56e7c4e-4ce2-4742-8645-6201f8c957f7/barbican-worker-log/0.log" Jan 28 14:26:30 crc kubenswrapper[4848]: I0128 14:26:30.181504 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9_64b9b93d-fe00-440a-88b0-dbb5f4621be9/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:30 crc kubenswrapper[4848]: I0128 14:26:30.424475 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/ceilometer-central-agent/0.log" Jan 28 14:26:30 crc kubenswrapper[4848]: I0128 14:26:30.433410 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/ceilometer-notification-agent/0.log" Jan 28 14:26:30 crc kubenswrapper[4848]: I0128 14:26:30.466403 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/proxy-httpd/0.log" Jan 28 14:26:30 crc kubenswrapper[4848]: I0128 14:26:30.611144 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/sg-core/0.log" Jan 28 14:26:30 crc kubenswrapper[4848]: I0128 14:26:30.747284 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0a69cc57-5cf8-4b44-a956-5641d66512fa/cinder-api-log/0.log" Jan 28 14:26:31 crc kubenswrapper[4848]: I0128 14:26:31.123389 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_b4edacab-a671-4ace-8bb5-bd113d2c666b/probe/0.log" Jan 28 14:26:31 crc kubenswrapper[4848]: I0128 14:26:31.203482 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0a69cc57-5cf8-4b44-a956-5641d66512fa/cinder-api/0.log" Jan 28 14:26:31 crc kubenswrapper[4848]: I0128 14:26:31.405460 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_04fa376f-7bc1-48da-870a-e8bb086f0263/cinder-scheduler/0.log" Jan 28 14:26:31 crc kubenswrapper[4848]: I0128 14:26:31.414718 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_b4edacab-a671-4ace-8bb5-bd113d2c666b/cinder-backup/0.log" Jan 28 14:26:31 crc kubenswrapper[4848]: I0128 14:26:31.521839 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_04fa376f-7bc1-48da-870a-e8bb086f0263/probe/0.log" Jan 28 14:26:31 crc kubenswrapper[4848]: I0128 14:26:31.739812 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_f434c780-9c6b-4fa2-b5a2-0220b134bb73/probe/0.log" Jan 28 14:26:31 crc kubenswrapper[4848]: I0128 14:26:31.847775 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_f434c780-9c6b-4fa2-b5a2-0220b134bb73/cinder-volume/0.log" Jan 28 14:26:32 crc kubenswrapper[4848]: I0128 14:26:32.008020 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_d004b545-6c1d-42f8-93cb-be2549026492/probe/0.log" Jan 28 14:26:32 crc kubenswrapper[4848]: I0128 14:26:32.149668 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_d004b545-6c1d-42f8-93cb-be2549026492/cinder-volume/0.log" Jan 28 14:26:32 crc kubenswrapper[4848]: I0128 14:26:32.154795 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc_354c2496-37a2-4d9c-9439-42b042ca2639/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:32 crc kubenswrapper[4848]: I0128 14:26:32.382842 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55b94cdbb7-56ttn_91425abd-325a-48c8-9c49-34b409614808/init/0.log" Jan 28 14:26:32 crc kubenswrapper[4848]: I0128 14:26:32.419381 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c_dee48f15-f76a-4039-b7a1-85c61a4d2ed3/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:32 crc kubenswrapper[4848]: I0128 14:26:32.673424 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55b94cdbb7-56ttn_91425abd-325a-48c8-9c49-34b409614808/init/0.log" Jan 28 14:26:32 crc kubenswrapper[4848]: I0128 14:26:32.766183 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6_5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:33 crc kubenswrapper[4848]: I0128 14:26:33.056801 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55b94cdbb7-56ttn_91425abd-325a-48c8-9c49-34b409614808/dnsmasq-dns/0.log" Jan 28 14:26:33 crc kubenswrapper[4848]: I0128 14:26:33.226329 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0ab62279-8f3a-4ad3-8de4-84c72ad421a1/glance-log/0.log" Jan 28 14:26:33 crc kubenswrapper[4848]: I0128 14:26:33.240931 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0ab62279-8f3a-4ad3-8de4-84c72ad421a1/glance-httpd/0.log" Jan 28 14:26:33 crc kubenswrapper[4848]: I0128 14:26:33.335861 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_03e938c1-a61a-4c60-9d8e-660cefebc2fc/glance-httpd/0.log" Jan 28 14:26:33 crc kubenswrapper[4848]: I0128 14:26:33.469361 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_03e938c1-a61a-4c60-9d8e-660cefebc2fc/glance-log/0.log" Jan 28 14:26:33 crc kubenswrapper[4848]: I0128 14:26:33.646379 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68f5655b9d-76qsp_dfa56dc1-1635-454c-95e0-74fdedcf8b00/horizon/0.log" Jan 28 14:26:33 crc kubenswrapper[4848]: I0128 14:26:33.970689 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr_4acf7592-041f-43a4-b85b-a2fac8dbdc3c/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:34 crc kubenswrapper[4848]: I0128 14:26:34.230090 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29493481-q89gd_7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502/keystone-cron/0.log" Jan 28 14:26:34 crc kubenswrapper[4848]: I0128 14:26:34.232753 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68f5655b9d-76qsp_dfa56dc1-1635-454c-95e0-74fdedcf8b00/horizon-log/0.log" Jan 28 14:26:34 crc kubenswrapper[4848]: I0128 14:26:34.239113 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-zjdcb_63a4d58d-3a42-4ddc-b735-af5e71c2ffd3/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:34 crc kubenswrapper[4848]: I0128 14:26:34.529190 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_791ef386-40ae-4395-aa5d-b86f13307c6c/kube-state-metrics/0.log" Jan 28 14:26:34 crc kubenswrapper[4848]: I0128 14:26:34.862751 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-thzjf_d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:34 crc kubenswrapper[4848]: I0128 14:26:34.998968 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-d454d7fbb-hth9j_e364a091-9a40-455c-b2dc-fd9a5d51181a/keystone-api/0.log" Jan 28 14:26:35 crc kubenswrapper[4848]: I0128 14:26:35.350057 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p_d991fbd4-087c-475f-99cb-ccfab86bda67/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:35 crc kubenswrapper[4848]: I0128 14:26:35.507177 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6997cd7cdf-nf254_ac7966e3-99c4-4e7c-b2d6-7229c78ca5db/neutron-httpd/0.log" Jan 28 14:26:35 crc kubenswrapper[4848]: I0128 14:26:35.614569 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6997cd7cdf-nf254_ac7966e3-99c4-4e7c-b2d6-7229c78ca5db/neutron-api/0.log" Jan 28 14:26:36 crc kubenswrapper[4848]: I0128 14:26:36.210720 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_84390cac-21ce-4f4f-98f8-a8371c1742cb/nova-cell0-conductor-conductor/0.log" Jan 28 14:26:36 crc kubenswrapper[4848]: I0128 14:26:36.579589 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_df8d8482-966f-4f20-836e-09bef423d150/nova-cell1-conductor-conductor/0.log" Jan 28 14:26:36 crc kubenswrapper[4848]: I0128 14:26:36.966208 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_a524b9a4-fe08-4675-b873-030d31d75a28/nova-cell1-novncproxy-novncproxy/0.log" Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.207090 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-rhf8n_15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.464983 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_68a7a2b4-9e0a-410e-b131-6bf39b7ffa35/nova-api-log/0.log" Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.636601 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_6c953264-454e-4949-906c-25378e467ab4/nova-metadata-log/0.log" Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.924232 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.924336 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.924403 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.925885 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:26:37 crc kubenswrapper[4848]: I0128 14:26:37.925976 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" gracePeriod=600 Jan 28 14:26:38 crc kubenswrapper[4848]: E0128 14:26:38.052395 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.067084 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_68a7a2b4-9e0a-410e-b131-6bf39b7ffa35/nova-api-api/0.log" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.253582 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_725cd16a-296a-485a-9d15-df106a2c6ebc/nova-scheduler-scheduler/0.log" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.259662 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3face43f-5a30-4c86-b004-3a98bb508b55/mysql-bootstrap/0.log" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.504470 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3face43f-5a30-4c86-b004-3a98bb508b55/mysql-bootstrap/0.log" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.540321 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3face43f-5a30-4c86-b004-3a98bb508b55/galera/0.log" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.821351 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ee209e0b-96f8-46ef-b1ff-2fac23c03ecc/mysql-bootstrap/0.log" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.982367 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" exitCode=0 Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.982428 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219"} Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.982476 4848 scope.go:117] "RemoveContainer" containerID="85ce7578267f96fcfe0593811bd362f1bfede91461e853c955b4ba8d74952d50" Jan 28 14:26:38 crc kubenswrapper[4848]: I0128 14:26:38.983382 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:26:38 crc kubenswrapper[4848]: E0128 14:26:38.983709 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:26:39 crc kubenswrapper[4848]: I0128 14:26:39.004954 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ee209e0b-96f8-46ef-b1ff-2fac23c03ecc/galera/0.log" Jan 28 14:26:39 crc kubenswrapper[4848]: I0128 14:26:39.051923 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ee209e0b-96f8-46ef-b1ff-2fac23c03ecc/mysql-bootstrap/0.log" Jan 28 14:26:39 crc kubenswrapper[4848]: I0128 14:26:39.299126 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_841fc796-225e-424f-bd6c-d3d43c9814d4/openstackclient/0.log" Jan 28 14:26:39 crc kubenswrapper[4848]: I0128 14:26:39.426119 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wk5zd_55d9487c-8ef4-4859-b3ca-6bd679cb1854/openstack-network-exporter/0.log" Jan 28 14:26:39 crc kubenswrapper[4848]: I0128 14:26:39.691863 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovsdb-server-init/0.log" Jan 28 14:26:39 crc kubenswrapper[4848]: I0128 14:26:39.924823 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovsdb-server-init/0.log" Jan 28 14:26:40 crc kubenswrapper[4848]: I0128 14:26:40.077290 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovsdb-server/0.log" Jan 28 14:26:40 crc kubenswrapper[4848]: I0128 14:26:40.466572 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-p6z9h_77e3e961-2cae-4bee-b73a-40336940b35c/ovn-controller/0.log" Jan 28 14:26:40 crc kubenswrapper[4848]: I0128 14:26:40.552869 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_6c953264-454e-4949-906c-25378e467ab4/nova-metadata-metadata/0.log" Jan 28 14:26:40 crc kubenswrapper[4848]: I0128 14:26:40.580394 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovs-vswitchd/0.log" Jan 28 14:26:40 crc kubenswrapper[4848]: I0128 14:26:40.855518 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-jv56f_ebc674d6-8c77-4481-b022-c91d7c77ec6e/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:40 crc kubenswrapper[4848]: I0128 14:26:40.893172 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_31b7f744-13ea-445d-99a0-57155c52e332/openstack-network-exporter/0.log" Jan 28 14:26:41 crc kubenswrapper[4848]: I0128 14:26:41.037898 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_31b7f744-13ea-445d-99a0-57155c52e332/ovn-northd/0.log" Jan 28 14:26:41 crc kubenswrapper[4848]: I0128 14:26:41.248977 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_38cd06a1-9204-4a3f-bb28-9227a8023af9/openstack-network-exporter/0.log" Jan 28 14:26:41 crc kubenswrapper[4848]: I0128 14:26:41.265971 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_38cd06a1-9204-4a3f-bb28-9227a8023af9/ovsdbserver-nb/0.log" Jan 28 14:26:41 crc kubenswrapper[4848]: I0128 14:26:41.887959 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_59144d8e-c7a9-442f-bcc3-585322a77a97/openstack-network-exporter/0.log" Jan 28 14:26:41 crc kubenswrapper[4848]: I0128 14:26:41.938017 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_59144d8e-c7a9-442f-bcc3-585322a77a97/ovsdbserver-sb/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.205705 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-648cdddfd-q5sbd_8e4ac2f3-a03f-4338-8cd3-188dc4829ea9/placement-api/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.387038 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/init-config-reloader/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.448586 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-648cdddfd-q5sbd_8e4ac2f3-a03f-4338-8cd3-188dc4829ea9/placement-log/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.604902 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/config-reloader/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.667596 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/init-config-reloader/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.673739 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/prometheus/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.721308 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/thanos-sidecar/0.log" Jan 28 14:26:42 crc kubenswrapper[4848]: I0128 14:26:42.934223 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2255ce73-5019-4b86-b15b-1e390099af55/setup-container/0.log" Jan 28 14:26:43 crc kubenswrapper[4848]: I0128 14:26:43.198768 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2255ce73-5019-4b86-b15b-1e390099af55/setup-container/0.log" Jan 28 14:26:43 crc kubenswrapper[4848]: I0128 14:26:43.269809 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_ff062566-cfd3-4393-b794-695d3473ef1a/setup-container/0.log" Jan 28 14:26:43 crc kubenswrapper[4848]: I0128 14:26:43.291404 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2255ce73-5019-4b86-b15b-1e390099af55/rabbitmq/0.log" Jan 28 14:26:43 crc kubenswrapper[4848]: I0128 14:26:43.595918 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_ff062566-cfd3-4393-b794-695d3473ef1a/setup-container/0.log" Jan 28 14:26:43 crc kubenswrapper[4848]: I0128 14:26:43.709650 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_ff062566-cfd3-4393-b794-695d3473ef1a/rabbitmq/0.log" Jan 28 14:26:43 crc kubenswrapper[4848]: I0128 14:26:43.715293 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36728af2-3caa-4d67-bec1-ed4b2d26547c/setup-container/0.log" Jan 28 14:26:43 crc kubenswrapper[4848]: I0128 14:26:43.970129 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36728af2-3caa-4d67-bec1-ed4b2d26547c/setup-container/0.log" Jan 28 14:26:44 crc kubenswrapper[4848]: I0128 14:26:44.005404 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36728af2-3caa-4d67-bec1-ed4b2d26547c/rabbitmq/0.log" Jan 28 14:26:44 crc kubenswrapper[4848]: I0128 14:26:44.155287 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm_0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:44 crc kubenswrapper[4848]: I0128 14:26:44.293314 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-w9xdz_c1cb683f-398f-4145-aa62-96ecbb02e82d/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:44 crc kubenswrapper[4848]: I0128 14:26:44.446905 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9_0c2e6d21-25c3-4653-bd87-18f42e3a68a5/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:44 crc kubenswrapper[4848]: I0128 14:26:44.604222 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-ntmv9_e8f81366-a592-4a64-b4e7-7d036d232b6b/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:44 crc kubenswrapper[4848]: I0128 14:26:44.736441 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-rzhsb_548fac9b-bd05-42b8-8c88-7c9de08ae4b2/ssh-known-hosts-edpm-deployment/0.log" Jan 28 14:26:45 crc kubenswrapper[4848]: I0128 14:26:45.124164 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77cbfc9c5c-vjds6_8c80f3cf-4e08-4748-95eb-400461e61399/proxy-server/0.log" Jan 28 14:26:45 crc kubenswrapper[4848]: I0128 14:26:45.200584 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77cbfc9c5c-vjds6_8c80f3cf-4e08-4748-95eb-400461e61399/proxy-httpd/0.log" Jan 28 14:26:45 crc kubenswrapper[4848]: I0128 14:26:45.555624 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-mgbt4_e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d/swift-ring-rebalance/0.log" Jan 28 14:26:45 crc kubenswrapper[4848]: I0128 14:26:45.632991 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-auditor/0.log" Jan 28 14:26:45 crc kubenswrapper[4848]: I0128 14:26:45.837792 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-reaper/0.log" Jan 28 14:26:45 crc kubenswrapper[4848]: I0128 14:26:45.896814 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-replicator/0.log" Jan 28 14:26:45 crc kubenswrapper[4848]: I0128 14:26:45.953900 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-server/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.107394 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-auditor/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.153755 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-replicator/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.183283 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-server/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.214180 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-updater/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.427921 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-expirer/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.483031 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-auditor/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.519225 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-server/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.527219 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-replicator/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.704902 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-updater/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.725512 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/rsync/0.log" Jan 28 14:26:46 crc kubenswrapper[4848]: I0128 14:26:46.865088 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/swift-recon-cron/0.log" Jan 28 14:26:47 crc kubenswrapper[4848]: I0128 14:26:47.077855 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-hkctn_42d08409-a571-40ac-968e-7ac9a5280841/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:47 crc kubenswrapper[4848]: I0128 14:26:47.170036 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_08f6c3e6-eb26-471d-947f-11cb5533c6c8/tempest-tests-tempest-tests-runner/0.log" Jan 28 14:26:47 crc kubenswrapper[4848]: I0128 14:26:47.352796 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_f8682e28-9944-4b82-b3d0-f6e6eca96b93/test-operator-logs-container/0.log" Jan 28 14:26:47 crc kubenswrapper[4848]: I0128 14:26:47.453179 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-67bz6_c4b08279-fe00-4688-8202-88df5280da09/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:26:47 crc kubenswrapper[4848]: I0128 14:26:47.709254 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_7e5a41be-973a-4b25-991f-ccbdef21b343/memcached/0.log" Jan 28 14:26:48 crc kubenswrapper[4848]: I0128 14:26:48.603643 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_b7811364-7959-428c-8be5-751c4b25f597/watcher-applier/0.log" Jan 28 14:26:49 crc kubenswrapper[4848]: I0128 14:26:49.267316 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_2c33d357-d7c0-4239-a58e-d882b915fafb/watcher-api-log/0.log" Jan 28 14:26:51 crc kubenswrapper[4848]: I0128 14:26:51.851976 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:26:51 crc kubenswrapper[4848]: E0128 14:26:51.853223 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:26:52 crc kubenswrapper[4848]: I0128 14:26:52.096077 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_c4b63577-cac1-4fce-bdca-c0b5a5d6c646/watcher-decision-engine/0.log" Jan 28 14:26:53 crc kubenswrapper[4848]: I0128 14:26:53.202162 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_2c33d357-d7c0-4239-a58e-d882b915fafb/watcher-api/0.log" Jan 28 14:27:04 crc kubenswrapper[4848]: I0128 14:27:04.857010 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:27:04 crc kubenswrapper[4848]: E0128 14:27:04.858241 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:27:18 crc kubenswrapper[4848]: I0128 14:27:18.850769 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:27:18 crc kubenswrapper[4848]: E0128 14:27:18.851907 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:27:23 crc kubenswrapper[4848]: I0128 14:27:23.435269 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/util/0.log" Jan 28 14:27:23 crc kubenswrapper[4848]: I0128 14:27:23.675424 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/pull/0.log" Jan 28 14:27:23 crc kubenswrapper[4848]: I0128 14:27:23.692896 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/util/0.log" Jan 28 14:27:23 crc kubenswrapper[4848]: I0128 14:27:23.747113 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/pull/0.log" Jan 28 14:27:23 crc kubenswrapper[4848]: I0128 14:27:23.875545 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/util/0.log" Jan 28 14:27:23 crc kubenswrapper[4848]: I0128 14:27:23.947749 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/extract/0.log" Jan 28 14:27:23 crc kubenswrapper[4848]: I0128 14:27:23.973398 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/pull/0.log" Jan 28 14:27:24 crc kubenswrapper[4848]: I0128 14:27:24.167489 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7f86f8796f-dj8qm_f41ee80c-1ab9-4786-8fec-d7b3a12d545b/manager/0.log" Jan 28 14:27:24 crc kubenswrapper[4848]: I0128 14:27:24.315438 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-7478f7dbf9-cjt92_4747f67c-5dd8-415a-8ff5-c6b43e1142cf/manager/0.log" Jan 28 14:27:24 crc kubenswrapper[4848]: I0128 14:27:24.483206 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-9nvdh_b29a79e7-07da-4c52-9798-e279092c28df/manager/0.log" Jan 28 14:27:24 crc kubenswrapper[4848]: I0128 14:27:24.637474 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-tx7mn_d20ac3bf-9cba-4074-962c-7ad7d7b17174/manager/0.log" Jan 28 14:27:24 crc kubenswrapper[4848]: I0128 14:27:24.770453 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-9jqlp_92cbecbc-09b7-4aa7-8511-dcc241d6b957/manager/0.log" Jan 28 14:27:24 crc kubenswrapper[4848]: I0128 14:27:24.953050 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-g54sg_e535d212-7524-4da1-9905-87af2259c702/manager/0.log" Jan 28 14:27:25 crc kubenswrapper[4848]: I0128 14:27:25.388716 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-694cf4f878-gcj9g_fe2e05c6-72db-4981-8b56-dc2a620003f2/manager/0.log" Jan 28 14:27:25 crc kubenswrapper[4848]: I0128 14:27:25.609201 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-598f7747c9-hfnz7_0a7152e1-cedd-465b-a186-9a241ca98141/manager/0.log" Jan 28 14:27:25 crc kubenswrapper[4848]: I0128 14:27:25.806670 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-xb97k_82ac0cb8-c28c-4242-8aa5-817aaf35ea3e/manager/0.log" Jan 28 14:27:25 crc kubenswrapper[4848]: I0128 14:27:25.866339 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-v6mn8_dedfeb84-9e8b-46f8-ac8f-0c5a85380160/manager/0.log" Jan 28 14:27:26 crc kubenswrapper[4848]: I0128 14:27:26.083427 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f_39a4178e-2251-4cc9-bc57-2b46a5902a3d/manager/0.log" Jan 28 14:27:26 crc kubenswrapper[4848]: I0128 14:27:26.171157 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78d58447c5-qpthc_365e9359-c6e2-428c-8889-95a232bb3e34/manager/0.log" Jan 28 14:27:26 crc kubenswrapper[4848]: I0128 14:27:26.410968 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-7bdb645866-jwvlh_34fd263e-f69d-4cc8-a003-ccb6f12273a6/manager/0.log" Jan 28 14:27:26 crc kubenswrapper[4848]: I0128 14:27:26.443031 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-5f4cd88d46-mckcj_8f0ab1f6-45a7-4731-b418-f9131c97217a/manager/0.log" Jan 28 14:27:26 crc kubenswrapper[4848]: I0128 14:27:26.659498 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6_390dea01-5c38-4c87-98c2-32f655af4a62/manager/0.log" Jan 28 14:27:26 crc kubenswrapper[4848]: I0128 14:27:26.856241 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-7db44d5f8c-t26mq_221cef79-cbf0-4a42-baca-872879406257/operator/0.log" Jan 28 14:27:27 crc kubenswrapper[4848]: I0128 14:27:27.159723 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6jnp2_730f88b0-924e-4c06-868f-4baf83bc17a9/registry-server/0.log" Jan 28 14:27:27 crc kubenswrapper[4848]: I0128 14:27:27.409621 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6f75f45d54-csw7g_2c9667bf-ec8d-4064-b52e-e5a0f55f09a3/manager/0.log" Jan 28 14:27:27 crc kubenswrapper[4848]: I0128 14:27:27.502566 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-79d5ccc684-2g2qj_ee8c2e3c-2df5-43aa-b624-e82e4cff81fb/manager/0.log" Jan 28 14:27:27 crc kubenswrapper[4848]: I0128 14:27:27.787500 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-8mp86_04f3b1d4-2f58-42d7-962c-d7a940b93469/operator/0.log" Jan 28 14:27:28 crc kubenswrapper[4848]: I0128 14:27:28.096659 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-cxnsf_1dada58b-0b20-4d23-aa46-164beef54624/manager/0.log" Jan 28 14:27:28 crc kubenswrapper[4848]: I0128 14:27:28.297514 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-s8mg8_164ef38a-92cd-4442-8925-509ba68366ba/manager/0.log" Jan 28 14:27:28 crc kubenswrapper[4848]: I0128 14:27:28.339567 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6b67879f4f-c5rbp_ef39eedb-8ccb-47f4-af2c-faee2565e2c9/manager/0.log" Jan 28 14:27:28 crc kubenswrapper[4848]: I0128 14:27:28.496120 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-5h8th_2801f0da-025c-46a4-a123-6e71c300b025/manager/0.log" Jan 28 14:27:28 crc kubenswrapper[4848]: I0128 14:27:28.603948 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-59c5775db7-r2ppl_0593b76f-9225-457e-9c0f-186dc73f37a3/manager/0.log" Jan 28 14:27:33 crc kubenswrapper[4848]: I0128 14:27:33.851146 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:27:33 crc kubenswrapper[4848]: E0128 14:27:33.853641 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.107771 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qp24n"] Jan 28 14:27:37 crc kubenswrapper[4848]: E0128 14:27:37.108697 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aac2d42c-081f-4435-8818-d520b2495521" containerName="container-00" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.108714 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="aac2d42c-081f-4435-8818-d520b2495521" containerName="container-00" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.108930 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="aac2d42c-081f-4435-8818-d520b2495521" containerName="container-00" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.110469 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.120714 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qp24n"] Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.214485 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-utilities\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.215021 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccdcm\" (UniqueName: \"kubernetes.io/projected/47020c65-d88b-42a4-8d62-5de343319a37-kube-api-access-ccdcm\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.215103 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-catalog-content\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.328883 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-utilities\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.328963 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccdcm\" (UniqueName: \"kubernetes.io/projected/47020c65-d88b-42a4-8d62-5de343319a37-kube-api-access-ccdcm\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.329005 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-catalog-content\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.329702 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-catalog-content\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.329711 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-utilities\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.371296 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccdcm\" (UniqueName: \"kubernetes.io/projected/47020c65-d88b-42a4-8d62-5de343319a37-kube-api-access-ccdcm\") pod \"certified-operators-qp24n\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:37 crc kubenswrapper[4848]: I0128 14:27:37.443740 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:38 crc kubenswrapper[4848]: I0128 14:27:38.201627 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qp24n"] Jan 28 14:27:38 crc kubenswrapper[4848]: I0128 14:27:38.694683 4848 generic.go:334] "Generic (PLEG): container finished" podID="47020c65-d88b-42a4-8d62-5de343319a37" containerID="19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69" exitCode=0 Jan 28 14:27:38 crc kubenswrapper[4848]: I0128 14:27:38.694784 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qp24n" event={"ID":"47020c65-d88b-42a4-8d62-5de343319a37","Type":"ContainerDied","Data":"19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69"} Jan 28 14:27:38 crc kubenswrapper[4848]: I0128 14:27:38.694922 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qp24n" event={"ID":"47020c65-d88b-42a4-8d62-5de343319a37","Type":"ContainerStarted","Data":"21edf5e1da6209ee43caf746e5b9d3b043f281a0ac91ad6e7d4c3b9d9027038d"} Jan 28 14:27:39 crc kubenswrapper[4848]: I0128 14:27:39.706131 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qp24n" event={"ID":"47020c65-d88b-42a4-8d62-5de343319a37","Type":"ContainerStarted","Data":"1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d"} Jan 28 14:27:40 crc kubenswrapper[4848]: I0128 14:27:40.720899 4848 generic.go:334] "Generic (PLEG): container finished" podID="47020c65-d88b-42a4-8d62-5de343319a37" containerID="1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d" exitCode=0 Jan 28 14:27:40 crc kubenswrapper[4848]: I0128 14:27:40.721154 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qp24n" event={"ID":"47020c65-d88b-42a4-8d62-5de343319a37","Type":"ContainerDied","Data":"1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d"} Jan 28 14:27:41 crc kubenswrapper[4848]: I0128 14:27:41.747402 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qp24n" event={"ID":"47020c65-d88b-42a4-8d62-5de343319a37","Type":"ContainerStarted","Data":"95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d"} Jan 28 14:27:41 crc kubenswrapper[4848]: I0128 14:27:41.789497 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qp24n" podStartSLOduration=2.380582389 podStartE2EDuration="4.789469128s" podCreationTimestamp="2026-01-28 14:27:37 +0000 UTC" firstStartedPulling="2026-01-28 14:27:38.697829894 +0000 UTC m=+6085.610046932" lastFinishedPulling="2026-01-28 14:27:41.106716623 +0000 UTC m=+6088.018933671" observedRunningTime="2026-01-28 14:27:41.77598027 +0000 UTC m=+6088.688197308" watchObservedRunningTime="2026-01-28 14:27:41.789469128 +0000 UTC m=+6088.701686166" Jan 28 14:27:45 crc kubenswrapper[4848]: I0128 14:27:45.850290 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:27:45 crc kubenswrapper[4848]: E0128 14:27:45.851479 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:27:47 crc kubenswrapper[4848]: I0128 14:27:47.443743 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:47 crc kubenswrapper[4848]: I0128 14:27:47.445369 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:47 crc kubenswrapper[4848]: I0128 14:27:47.507104 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:47 crc kubenswrapper[4848]: I0128 14:27:47.858605 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:47 crc kubenswrapper[4848]: I0128 14:27:47.922709 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qp24n"] Jan 28 14:27:49 crc kubenswrapper[4848]: I0128 14:27:49.821316 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qp24n" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="registry-server" containerID="cri-o://95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d" gracePeriod=2 Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.385722 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.465133 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccdcm\" (UniqueName: \"kubernetes.io/projected/47020c65-d88b-42a4-8d62-5de343319a37-kube-api-access-ccdcm\") pod \"47020c65-d88b-42a4-8d62-5de343319a37\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.465697 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-utilities\") pod \"47020c65-d88b-42a4-8d62-5de343319a37\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.465738 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-catalog-content\") pod \"47020c65-d88b-42a4-8d62-5de343319a37\" (UID: \"47020c65-d88b-42a4-8d62-5de343319a37\") " Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.466714 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-utilities" (OuterVolumeSpecName: "utilities") pod "47020c65-d88b-42a4-8d62-5de343319a37" (UID: "47020c65-d88b-42a4-8d62-5de343319a37"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.496171 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47020c65-d88b-42a4-8d62-5de343319a37-kube-api-access-ccdcm" (OuterVolumeSpecName: "kube-api-access-ccdcm") pod "47020c65-d88b-42a4-8d62-5de343319a37" (UID: "47020c65-d88b-42a4-8d62-5de343319a37"). InnerVolumeSpecName "kube-api-access-ccdcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.525842 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "47020c65-d88b-42a4-8d62-5de343319a37" (UID: "47020c65-d88b-42a4-8d62-5de343319a37"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.568168 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.568472 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47020c65-d88b-42a4-8d62-5de343319a37-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.568549 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccdcm\" (UniqueName: \"kubernetes.io/projected/47020c65-d88b-42a4-8d62-5de343319a37-kube-api-access-ccdcm\") on node \"crc\" DevicePath \"\"" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.839723 4848 generic.go:334] "Generic (PLEG): container finished" podID="47020c65-d88b-42a4-8d62-5de343319a37" containerID="95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d" exitCode=0 Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.839810 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qp24n" event={"ID":"47020c65-d88b-42a4-8d62-5de343319a37","Type":"ContainerDied","Data":"95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d"} Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.839897 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qp24n" event={"ID":"47020c65-d88b-42a4-8d62-5de343319a37","Type":"ContainerDied","Data":"21edf5e1da6209ee43caf746e5b9d3b043f281a0ac91ad6e7d4c3b9d9027038d"} Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.839900 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qp24n" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.839924 4848 scope.go:117] "RemoveContainer" containerID="95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.900819 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qp24n"] Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.905508 4848 scope.go:117] "RemoveContainer" containerID="1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d" Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.922965 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qp24n"] Jan 28 14:27:50 crc kubenswrapper[4848]: I0128 14:27:50.962105 4848 scope.go:117] "RemoveContainer" containerID="19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69" Jan 28 14:27:51 crc kubenswrapper[4848]: I0128 14:27:51.001379 4848 scope.go:117] "RemoveContainer" containerID="95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d" Jan 28 14:27:51 crc kubenswrapper[4848]: E0128 14:27:51.002662 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d\": container with ID starting with 95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d not found: ID does not exist" containerID="95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d" Jan 28 14:27:51 crc kubenswrapper[4848]: I0128 14:27:51.002725 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d"} err="failed to get container status \"95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d\": rpc error: code = NotFound desc = could not find container \"95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d\": container with ID starting with 95e14a87e7fe63785ad569a3e71a0d7e5b73f68f1cb5591b97c1952ab40db80d not found: ID does not exist" Jan 28 14:27:51 crc kubenswrapper[4848]: I0128 14:27:51.002791 4848 scope.go:117] "RemoveContainer" containerID="1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d" Jan 28 14:27:51 crc kubenswrapper[4848]: E0128 14:27:51.003391 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d\": container with ID starting with 1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d not found: ID does not exist" containerID="1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d" Jan 28 14:27:51 crc kubenswrapper[4848]: I0128 14:27:51.003432 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d"} err="failed to get container status \"1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d\": rpc error: code = NotFound desc = could not find container \"1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d\": container with ID starting with 1b5c4c0b6754a0b5a58ca11c85ec629d48ec8116c07c894d8f308595f3c2e52d not found: ID does not exist" Jan 28 14:27:51 crc kubenswrapper[4848]: I0128 14:27:51.003459 4848 scope.go:117] "RemoveContainer" containerID="19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69" Jan 28 14:27:51 crc kubenswrapper[4848]: E0128 14:27:51.003803 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69\": container with ID starting with 19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69 not found: ID does not exist" containerID="19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69" Jan 28 14:27:51 crc kubenswrapper[4848]: I0128 14:27:51.003851 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69"} err="failed to get container status \"19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69\": rpc error: code = NotFound desc = could not find container \"19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69\": container with ID starting with 19e924ac1ee3a84f616750c72f36ed925d02578c07041ce60842b13e09b2ca69 not found: ID does not exist" Jan 28 14:27:52 crc kubenswrapper[4848]: I0128 14:27:52.874611 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47020c65-d88b-42a4-8d62-5de343319a37" path="/var/lib/kubelet/pods/47020c65-d88b-42a4-8d62-5de343319a37/volumes" Jan 28 14:27:54 crc kubenswrapper[4848]: I0128 14:27:54.541801 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-p4g9c_3daae941-7347-4673-8fef-20c2785a8cd6/control-plane-machine-set-operator/0.log" Jan 28 14:27:54 crc kubenswrapper[4848]: I0128 14:27:54.765100 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-zdq5h_4fa929eb-e746-4253-9cf6-dcb0939da532/kube-rbac-proxy/0.log" Jan 28 14:27:54 crc kubenswrapper[4848]: I0128 14:27:54.793846 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-zdq5h_4fa929eb-e746-4253-9cf6-dcb0939da532/machine-api-operator/0.log" Jan 28 14:27:59 crc kubenswrapper[4848]: I0128 14:27:59.850187 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:27:59 crc kubenswrapper[4848]: E0128 14:27:59.851124 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:28:10 crc kubenswrapper[4848]: I0128 14:28:10.344311 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-c7q52_59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8/cert-manager-controller/0.log" Jan 28 14:28:10 crc kubenswrapper[4848]: I0128 14:28:10.579950 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-cvf98_09794657-9406-4696-9df8-0f0d782604de/cert-manager-cainjector/0.log" Jan 28 14:28:10 crc kubenswrapper[4848]: I0128 14:28:10.697391 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-dwrr2_4661c13f-0355-4d7e-b7d9-5a3446bfcc17/cert-manager-webhook/0.log" Jan 28 14:28:14 crc kubenswrapper[4848]: I0128 14:28:14.861238 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:28:14 crc kubenswrapper[4848]: E0128 14:28:14.862520 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:28:27 crc kubenswrapper[4848]: I0128 14:28:27.526980 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-76xvb_ae62f49e-2ce4-4e48-b803-2b46a5319273/nmstate-console-plugin/0.log" Jan 28 14:28:27 crc kubenswrapper[4848]: I0128 14:28:27.716438 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-w28lf_88ba0124-029f-4b9c-8479-2ee4c089bcbb/nmstate-handler/0.log" Jan 28 14:28:27 crc kubenswrapper[4848]: I0128 14:28:27.830947 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-7gp2j_b67bb5ba-7747-475f-a3c5-de2b7df72934/kube-rbac-proxy/0.log" Jan 28 14:28:27 crc kubenswrapper[4848]: I0128 14:28:27.969568 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-7gp2j_b67bb5ba-7747-475f-a3c5-de2b7df72934/nmstate-metrics/0.log" Jan 28 14:28:28 crc kubenswrapper[4848]: I0128 14:28:28.032081 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-rbzxj_829bde53-8549-411f-a1ff-a00769198b1c/nmstate-operator/0.log" Jan 28 14:28:28 crc kubenswrapper[4848]: I0128 14:28:28.134129 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-52stv_38d465b1-a9c1-4007-8406-9fd77ec0ead4/nmstate-webhook/0.log" Jan 28 14:28:29 crc kubenswrapper[4848]: I0128 14:28:29.850364 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:28:29 crc kubenswrapper[4848]: E0128 14:28:29.851106 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:28:43 crc kubenswrapper[4848]: I0128 14:28:43.703475 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-pwsdh_021caff7-8415-451a-941e-20d025a0aa2b/prometheus-operator/0.log" Jan 28 14:28:43 crc kubenswrapper[4848]: I0128 14:28:43.944710 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn_25424d22-6211-41f8-9482-de5ca224224c/prometheus-operator-admission-webhook/0.log" Jan 28 14:28:44 crc kubenswrapper[4848]: I0128 14:28:44.023182 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8_40955df6-8a58-487d-98fb-f8632536c72a/prometheus-operator-admission-webhook/0.log" Jan 28 14:28:44 crc kubenswrapper[4848]: I0128 14:28:44.146432 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-hs6jb_ec6c23a2-9920-4672-92c6-c44569e918d4/operator/0.log" Jan 28 14:28:44 crc kubenswrapper[4848]: I0128 14:28:44.290665 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-lh2xv_ff57a0c9-f0c9-4ba1-9166-37cb03178711/perses-operator/0.log" Jan 28 14:28:44 crc kubenswrapper[4848]: I0128 14:28:44.860395 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:28:44 crc kubenswrapper[4848]: E0128 14:28:44.862396 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:28:56 crc kubenswrapper[4848]: I0128 14:28:56.850772 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:28:56 crc kubenswrapper[4848]: E0128 14:28:56.851708 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.173380 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-tz8dm_ce4dce22-bb0b-4fc3-b724-edbfe04cea8b/kube-rbac-proxy/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.226950 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-tz8dm_ce4dce22-bb0b-4fc3-b724-edbfe04cea8b/controller/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.434066 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.618934 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.650622 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.686698 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.709106 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.948471 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.983328 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:29:00 crc kubenswrapper[4848]: I0128 14:29:00.983862 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.004215 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.243071 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/controller/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.249879 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.254863 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.271314 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.514740 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/kube-rbac-proxy-frr/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.518326 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/frr-metrics/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.522434 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/kube-rbac-proxy/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.769381 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/reloader/0.log" Jan 28 14:29:01 crc kubenswrapper[4848]: I0128 14:29:01.800162 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-kdftv_9744680c-1423-4e9a-a285-bca5722378d9/frr-k8s-webhook-server/0.log" Jan 28 14:29:02 crc kubenswrapper[4848]: I0128 14:29:02.118541 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-767fd6bd7f-8fzzq_1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b/manager/0.log" Jan 28 14:29:02 crc kubenswrapper[4848]: I0128 14:29:02.278201 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7d6997b498-j9mdf_a8573d7d-c62b-45f5-9f5c-90a45126a2f4/webhook-server/0.log" Jan 28 14:29:02 crc kubenswrapper[4848]: I0128 14:29:02.422134 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sfdg2_4645d31f-e3e8-4c7a-ace2-c82b88fd7488/kube-rbac-proxy/0.log" Jan 28 14:29:03 crc kubenswrapper[4848]: I0128 14:29:03.140480 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sfdg2_4645d31f-e3e8-4c7a-ace2-c82b88fd7488/speaker/0.log" Jan 28 14:29:03 crc kubenswrapper[4848]: I0128 14:29:03.403745 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/frr/0.log" Jan 28 14:29:09 crc kubenswrapper[4848]: I0128 14:29:09.849753 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:29:09 crc kubenswrapper[4848]: E0128 14:29:09.850758 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.272968 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zz7q4"] Jan 28 14:29:15 crc kubenswrapper[4848]: E0128 14:29:15.274231 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="extract-utilities" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.274266 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="extract-utilities" Jan 28 14:29:15 crc kubenswrapper[4848]: E0128 14:29:15.274278 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="registry-server" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.274283 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="registry-server" Jan 28 14:29:15 crc kubenswrapper[4848]: E0128 14:29:15.274308 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="extract-content" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.274315 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="extract-content" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.274572 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="47020c65-d88b-42a4-8d62-5de343319a37" containerName="registry-server" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.276405 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.300582 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz7q4"] Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.364849 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g92bf\" (UniqueName: \"kubernetes.io/projected/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-kube-api-access-g92bf\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.364917 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-catalog-content\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.364961 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-utilities\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.467543 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g92bf\" (UniqueName: \"kubernetes.io/projected/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-kube-api-access-g92bf\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.467610 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-catalog-content\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.467652 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-utilities\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.468442 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-catalog-content\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.468576 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-utilities\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.490151 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g92bf\" (UniqueName: \"kubernetes.io/projected/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-kube-api-access-g92bf\") pod \"redhat-operators-zz7q4\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:15 crc kubenswrapper[4848]: I0128 14:29:15.598382 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:16 crc kubenswrapper[4848]: I0128 14:29:16.153162 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz7q4"] Jan 28 14:29:16 crc kubenswrapper[4848]: I0128 14:29:16.815751 4848 generic.go:334] "Generic (PLEG): container finished" podID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerID="ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b" exitCode=0 Jan 28 14:29:16 crc kubenswrapper[4848]: I0128 14:29:16.815823 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz7q4" event={"ID":"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353","Type":"ContainerDied","Data":"ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b"} Jan 28 14:29:16 crc kubenswrapper[4848]: I0128 14:29:16.815867 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz7q4" event={"ID":"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353","Type":"ContainerStarted","Data":"878496ea40e1359c0756f1e430533be3666342dac29525807d41e34623b86078"} Jan 28 14:29:16 crc kubenswrapper[4848]: I0128 14:29:16.818655 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 14:29:18 crc kubenswrapper[4848]: I0128 14:29:18.886741 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz7q4" event={"ID":"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353","Type":"ContainerStarted","Data":"5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae"} Jan 28 14:29:19 crc kubenswrapper[4848]: I0128 14:29:19.729016 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/util/0.log" Jan 28 14:29:20 crc kubenswrapper[4848]: I0128 14:29:20.053791 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/pull/0.log" Jan 28 14:29:20 crc kubenswrapper[4848]: I0128 14:29:20.103762 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/util/0.log" Jan 28 14:29:20 crc kubenswrapper[4848]: I0128 14:29:20.111342 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/pull/0.log" Jan 28 14:29:20 crc kubenswrapper[4848]: I0128 14:29:20.285550 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/util/0.log" Jan 28 14:29:20 crc kubenswrapper[4848]: I0128 14:29:20.290502 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/pull/0.log" Jan 28 14:29:20 crc kubenswrapper[4848]: I0128 14:29:20.464777 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/extract/0.log" Jan 28 14:29:20 crc kubenswrapper[4848]: I0128 14:29:20.819001 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/util/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.084798 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/pull/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.094470 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/util/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.124020 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/pull/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.295345 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/util/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.353368 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/pull/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.453160 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/extract/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.508717 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/util/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.799886 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/util/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.822976 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/pull/0.log" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.850944 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:29:21 crc kubenswrapper[4848]: E0128 14:29:21.851297 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:29:21 crc kubenswrapper[4848]: I0128 14:29:21.863467 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/pull/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.065603 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/extract/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.107599 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/util/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.202066 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/pull/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.381117 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-utilities/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.631837 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-utilities/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.679392 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-content/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.710253 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-content/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.869500 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-utilities/0.log" Jan 28 14:29:22 crc kubenswrapper[4848]: I0128 14:29:22.884280 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-content/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.171854 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-utilities/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.486583 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-utilities/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.565536 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/registry-server/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.565981 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-content/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.587110 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-content/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.809280 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-content/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.858762 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-utilities/0.log" Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.931251 4848 generic.go:334] "Generic (PLEG): container finished" podID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerID="5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae" exitCode=0 Jan 28 14:29:23 crc kubenswrapper[4848]: I0128 14:29:23.931360 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz7q4" event={"ID":"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353","Type":"ContainerDied","Data":"5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae"} Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.207492 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-g5r8p_69959509-efcd-4928-98ad-1dcd656b5513/marketplace-operator/0.log" Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.320935 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-utilities/0.log" Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.528455 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-utilities/0.log" Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.675692 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-content/0.log" Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.777760 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-content/0.log" Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.852828 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/registry-server/0.log" Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.964507 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz7q4" event={"ID":"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353","Type":"ContainerStarted","Data":"edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2"} Jan 28 14:29:24 crc kubenswrapper[4848]: I0128 14:29:24.996517 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zz7q4" podStartSLOduration=2.379491438 podStartE2EDuration="9.996487916s" podCreationTimestamp="2026-01-28 14:29:15 +0000 UTC" firstStartedPulling="2026-01-28 14:29:16.818341943 +0000 UTC m=+6183.730558981" lastFinishedPulling="2026-01-28 14:29:24.435338421 +0000 UTC m=+6191.347555459" observedRunningTime="2026-01-28 14:29:24.994079921 +0000 UTC m=+6191.906296979" watchObservedRunningTime="2026-01-28 14:29:24.996487916 +0000 UTC m=+6191.908704954" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.330778 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-content/0.log" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.437747 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-utilities/0.log" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.585033 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-utilities/0.log" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.598628 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.598723 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.615288 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/registry-server/0.log" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.916678 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-utilities/0.log" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.946778 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-content/0.log" Jan 28 14:29:25 crc kubenswrapper[4848]: I0128 14:29:25.984054 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-content/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.257983 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-utilities/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.318804 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zz7q4_4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/extract-utilities/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.387416 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-content/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.630444 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zz7q4_4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/extract-content/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.661016 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zz7q4" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="registry-server" probeResult="failure" output=< Jan 28 14:29:26 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 14:29:26 crc kubenswrapper[4848]: > Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.664895 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zz7q4_4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/extract-utilities/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.753328 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zz7q4_4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/extract-content/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.802355 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/registry-server/0.log" Jan 28 14:29:26 crc kubenswrapper[4848]: I0128 14:29:26.976116 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zz7q4_4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/extract-content/0.log" Jan 28 14:29:27 crc kubenswrapper[4848]: I0128 14:29:27.097543 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zz7q4_4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/registry-server/0.log" Jan 28 14:29:27 crc kubenswrapper[4848]: I0128 14:29:27.121197 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zz7q4_4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/extract-utilities/0.log" Jan 28 14:29:35 crc kubenswrapper[4848]: I0128 14:29:35.663452 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:35 crc kubenswrapper[4848]: I0128 14:29:35.730599 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:35 crc kubenswrapper[4848]: I0128 14:29:35.850428 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:29:35 crc kubenswrapper[4848]: E0128 14:29:35.850747 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:29:35 crc kubenswrapper[4848]: I0128 14:29:35.911541 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz7q4"] Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.208380 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zz7q4" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="registry-server" containerID="cri-o://edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2" gracePeriod=2 Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.776668 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.906239 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-utilities\") pod \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.906393 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-catalog-content\") pod \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.906589 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g92bf\" (UniqueName: \"kubernetes.io/projected/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-kube-api-access-g92bf\") pod \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\" (UID: \"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353\") " Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.907093 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-utilities" (OuterVolumeSpecName: "utilities") pod "4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" (UID: "4ff1299e-dd1b-4cbf-a24d-7e8a761b2353"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.907687 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:29:37 crc kubenswrapper[4848]: I0128 14:29:37.917434 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-kube-api-access-g92bf" (OuterVolumeSpecName: "kube-api-access-g92bf") pod "4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" (UID: "4ff1299e-dd1b-4cbf-a24d-7e8a761b2353"). InnerVolumeSpecName "kube-api-access-g92bf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.010726 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g92bf\" (UniqueName: \"kubernetes.io/projected/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-kube-api-access-g92bf\") on node \"crc\" DevicePath \"\"" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.053880 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" (UID: "4ff1299e-dd1b-4cbf-a24d-7e8a761b2353"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.112653 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.229886 4848 generic.go:334] "Generic (PLEG): container finished" podID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerID="edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2" exitCode=0 Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.229955 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz7q4" event={"ID":"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353","Type":"ContainerDied","Data":"edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2"} Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.230005 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz7q4" event={"ID":"4ff1299e-dd1b-4cbf-a24d-7e8a761b2353","Type":"ContainerDied","Data":"878496ea40e1359c0756f1e430533be3666342dac29525807d41e34623b86078"} Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.230013 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz7q4" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.230039 4848 scope.go:117] "RemoveContainer" containerID="edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.275798 4848 scope.go:117] "RemoveContainer" containerID="5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.342430 4848 scope.go:117] "RemoveContainer" containerID="ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.348625 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz7q4"] Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.359696 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zz7q4"] Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.376012 4848 scope.go:117] "RemoveContainer" containerID="edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2" Jan 28 14:29:38 crc kubenswrapper[4848]: E0128 14:29:38.378936 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2\": container with ID starting with edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2 not found: ID does not exist" containerID="edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.379019 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2"} err="failed to get container status \"edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2\": rpc error: code = NotFound desc = could not find container \"edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2\": container with ID starting with edd1ade7127a75faa690715b0882b06b7971549822576db74d0f9b9e70e791b2 not found: ID does not exist" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.379059 4848 scope.go:117] "RemoveContainer" containerID="5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae" Jan 28 14:29:38 crc kubenswrapper[4848]: E0128 14:29:38.380232 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae\": container with ID starting with 5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae not found: ID does not exist" containerID="5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.380276 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae"} err="failed to get container status \"5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae\": rpc error: code = NotFound desc = could not find container \"5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae\": container with ID starting with 5dce3e5daf496c314120b273b05570028b6d5ffffad62934b69c192333388bae not found: ID does not exist" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.380297 4848 scope.go:117] "RemoveContainer" containerID="ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b" Jan 28 14:29:38 crc kubenswrapper[4848]: E0128 14:29:38.380557 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b\": container with ID starting with ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b not found: ID does not exist" containerID="ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.380586 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b"} err="failed to get container status \"ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b\": rpc error: code = NotFound desc = could not find container \"ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b\": container with ID starting with ebbb57fc7d40436eafa5fc51dbf675f4e459c2485b22474a802559e245fe2e1b not found: ID does not exist" Jan 28 14:29:38 crc kubenswrapper[4848]: I0128 14:29:38.863885 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" path="/var/lib/kubelet/pods/4ff1299e-dd1b-4cbf-a24d-7e8a761b2353/volumes" Jan 28 14:29:42 crc kubenswrapper[4848]: I0128 14:29:42.140995 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-pwsdh_021caff7-8415-451a-941e-20d025a0aa2b/prometheus-operator/0.log" Jan 28 14:29:42 crc kubenswrapper[4848]: I0128 14:29:42.146216 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8_40955df6-8a58-487d-98fb-f8632536c72a/prometheus-operator-admission-webhook/0.log" Jan 28 14:29:42 crc kubenswrapper[4848]: I0128 14:29:42.160188 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn_25424d22-6211-41f8-9482-de5ca224224c/prometheus-operator-admission-webhook/0.log" Jan 28 14:29:42 crc kubenswrapper[4848]: I0128 14:29:42.384777 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-lh2xv_ff57a0c9-f0c9-4ba1-9166-37cb03178711/perses-operator/0.log" Jan 28 14:29:42 crc kubenswrapper[4848]: I0128 14:29:42.428984 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-hs6jb_ec6c23a2-9920-4672-92c6-c44569e918d4/operator/0.log" Jan 28 14:29:46 crc kubenswrapper[4848]: I0128 14:29:46.850928 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:29:46 crc kubenswrapper[4848]: E0128 14:29:46.852398 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.167886 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv"] Jan 28 14:30:00 crc kubenswrapper[4848]: E0128 14:30:00.169175 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="extract-content" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.169196 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="extract-content" Jan 28 14:30:00 crc kubenswrapper[4848]: E0128 14:30:00.169535 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="registry-server" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.169546 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="registry-server" Jan 28 14:30:00 crc kubenswrapper[4848]: E0128 14:30:00.169564 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="extract-utilities" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.169573 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="extract-utilities" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.169838 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ff1299e-dd1b-4cbf-a24d-7e8a761b2353" containerName="registry-server" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.171262 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.176651 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.176992 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.185781 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv"] Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.318239 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45210f69-d6f9-4366-9fa1-7d63d168ada6-secret-volume\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.318786 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45210f69-d6f9-4366-9fa1-7d63d168ada6-config-volume\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.318823 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25hzn\" (UniqueName: \"kubernetes.io/projected/45210f69-d6f9-4366-9fa1-7d63d168ada6-kube-api-access-25hzn\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.421273 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45210f69-d6f9-4366-9fa1-7d63d168ada6-config-volume\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.421345 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25hzn\" (UniqueName: \"kubernetes.io/projected/45210f69-d6f9-4366-9fa1-7d63d168ada6-kube-api-access-25hzn\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.421499 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45210f69-d6f9-4366-9fa1-7d63d168ada6-secret-volume\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.422538 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45210f69-d6f9-4366-9fa1-7d63d168ada6-config-volume\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.432391 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45210f69-d6f9-4366-9fa1-7d63d168ada6-secret-volume\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.448933 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25hzn\" (UniqueName: \"kubernetes.io/projected/45210f69-d6f9-4366-9fa1-7d63d168ada6-kube-api-access-25hzn\") pod \"collect-profiles-29493510-dgtxv\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.525745 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:00 crc kubenswrapper[4848]: I0128 14:30:00.850577 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:30:00 crc kubenswrapper[4848]: E0128 14:30:00.851372 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:30:01 crc kubenswrapper[4848]: I0128 14:30:01.217190 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv"] Jan 28 14:30:01 crc kubenswrapper[4848]: I0128 14:30:01.510276 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" event={"ID":"45210f69-d6f9-4366-9fa1-7d63d168ada6","Type":"ContainerStarted","Data":"415408010b4f5d0d2da22a2a32c2a77a7074f9cb9cb18427436f5b263b6d0b74"} Jan 28 14:30:01 crc kubenswrapper[4848]: I0128 14:30:01.511047 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" event={"ID":"45210f69-d6f9-4366-9fa1-7d63d168ada6","Type":"ContainerStarted","Data":"7e3e19c37ccae88f87234e1538b9811389e38c8901634cc3dec975f81ed87d61"} Jan 28 14:30:01 crc kubenswrapper[4848]: I0128 14:30:01.533514 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" podStartSLOduration=1.5334901859999999 podStartE2EDuration="1.533490186s" podCreationTimestamp="2026-01-28 14:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 14:30:01.525004704 +0000 UTC m=+6228.437221742" watchObservedRunningTime="2026-01-28 14:30:01.533490186 +0000 UTC m=+6228.445707224" Jan 28 14:30:02 crc kubenswrapper[4848]: I0128 14:30:02.527622 4848 generic.go:334] "Generic (PLEG): container finished" podID="45210f69-d6f9-4366-9fa1-7d63d168ada6" containerID="415408010b4f5d0d2da22a2a32c2a77a7074f9cb9cb18427436f5b263b6d0b74" exitCode=0 Jan 28 14:30:02 crc kubenswrapper[4848]: I0128 14:30:02.527673 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" event={"ID":"45210f69-d6f9-4366-9fa1-7d63d168ada6","Type":"ContainerDied","Data":"415408010b4f5d0d2da22a2a32c2a77a7074f9cb9cb18427436f5b263b6d0b74"} Jan 28 14:30:03 crc kubenswrapper[4848]: I0128 14:30:03.948771 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.056878 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25hzn\" (UniqueName: \"kubernetes.io/projected/45210f69-d6f9-4366-9fa1-7d63d168ada6-kube-api-access-25hzn\") pod \"45210f69-d6f9-4366-9fa1-7d63d168ada6\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.057063 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45210f69-d6f9-4366-9fa1-7d63d168ada6-secret-volume\") pod \"45210f69-d6f9-4366-9fa1-7d63d168ada6\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.057151 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45210f69-d6f9-4366-9fa1-7d63d168ada6-config-volume\") pod \"45210f69-d6f9-4366-9fa1-7d63d168ada6\" (UID: \"45210f69-d6f9-4366-9fa1-7d63d168ada6\") " Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.064219 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45210f69-d6f9-4366-9fa1-7d63d168ada6-config-volume" (OuterVolumeSpecName: "config-volume") pod "45210f69-d6f9-4366-9fa1-7d63d168ada6" (UID: "45210f69-d6f9-4366-9fa1-7d63d168ada6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.070371 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45210f69-d6f9-4366-9fa1-7d63d168ada6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "45210f69-d6f9-4366-9fa1-7d63d168ada6" (UID: "45210f69-d6f9-4366-9fa1-7d63d168ada6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.081689 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45210f69-d6f9-4366-9fa1-7d63d168ada6-kube-api-access-25hzn" (OuterVolumeSpecName: "kube-api-access-25hzn") pod "45210f69-d6f9-4366-9fa1-7d63d168ada6" (UID: "45210f69-d6f9-4366-9fa1-7d63d168ada6"). InnerVolumeSpecName "kube-api-access-25hzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.161795 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45210f69-d6f9-4366-9fa1-7d63d168ada6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.161866 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45210f69-d6f9-4366-9fa1-7d63d168ada6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.161902 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25hzn\" (UniqueName: \"kubernetes.io/projected/45210f69-d6f9-4366-9fa1-7d63d168ada6-kube-api-access-25hzn\") on node \"crc\" DevicePath \"\"" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.248338 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh"] Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.264127 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493465-57wrh"] Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.550702 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" event={"ID":"45210f69-d6f9-4366-9fa1-7d63d168ada6","Type":"ContainerDied","Data":"7e3e19c37ccae88f87234e1538b9811389e38c8901634cc3dec975f81ed87d61"} Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.551150 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e3e19c37ccae88f87234e1538b9811389e38c8901634cc3dec975f81ed87d61" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.550743 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493510-dgtxv" Jan 28 14:30:04 crc kubenswrapper[4848]: I0128 14:30:04.864169 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="068b97ab-42f7-4b69-bca2-1cd58c1298ae" path="/var/lib/kubelet/pods/068b97ab-42f7-4b69-bca2-1cd58c1298ae/volumes" Jan 28 14:30:08 crc kubenswrapper[4848]: I0128 14:30:08.102093 4848 scope.go:117] "RemoveContainer" containerID="3085267885a4c81fa866e651847a7a285cee48bc4ffad715fc0cbd136e91f8dd" Jan 28 14:30:14 crc kubenswrapper[4848]: I0128 14:30:14.858116 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:30:14 crc kubenswrapper[4848]: E0128 14:30:14.859103 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:30:27 crc kubenswrapper[4848]: I0128 14:30:27.868023 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:30:27 crc kubenswrapper[4848]: E0128 14:30:27.869824 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:30:42 crc kubenswrapper[4848]: I0128 14:30:42.853029 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:30:42 crc kubenswrapper[4848]: E0128 14:30:42.854097 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:30:57 crc kubenswrapper[4848]: I0128 14:30:57.851208 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:30:57 crc kubenswrapper[4848]: E0128 14:30:57.852308 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:31:08 crc kubenswrapper[4848]: I0128 14:31:08.194472 4848 scope.go:117] "RemoveContainer" containerID="b876c30536bc9d3f55f1adcd299e0056dace96ab120b9c9bb01e513a5324f1b4" Jan 28 14:31:11 crc kubenswrapper[4848]: I0128 14:31:11.849707 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:31:11 crc kubenswrapper[4848]: E0128 14:31:11.852238 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:31:22 crc kubenswrapper[4848]: I0128 14:31:22.898310 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:31:22 crc kubenswrapper[4848]: E0128 14:31:22.902085 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:31:35 crc kubenswrapper[4848]: I0128 14:31:35.850447 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:31:35 crc kubenswrapper[4848]: E0128 14:31:35.851713 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:31:49 crc kubenswrapper[4848]: I0128 14:31:49.851815 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:31:50 crc kubenswrapper[4848]: I0128 14:31:50.315142 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"490c280e1157715c8bda663dcfbeb8881688ac0226ce28dcc3e38c2ea7cab704"} Jan 28 14:32:08 crc kubenswrapper[4848]: I0128 14:32:08.302360 4848 scope.go:117] "RemoveContainer" containerID="f39eb12bb9b1a4002d2c0fe8c03f58a6437adc226c7d2dd6d2536a771183b769" Jan 28 14:32:08 crc kubenswrapper[4848]: I0128 14:32:08.536832 4848 generic.go:334] "Generic (PLEG): container finished" podID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerID="5c9941bb402897ef4e6a4e943aad058d65b90eb5240e7d8bcec35f872573f084" exitCode=0 Jan 28 14:32:08 crc kubenswrapper[4848]: I0128 14:32:08.536926 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-29w6z/must-gather-wb4fr" event={"ID":"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760","Type":"ContainerDied","Data":"5c9941bb402897ef4e6a4e943aad058d65b90eb5240e7d8bcec35f872573f084"} Jan 28 14:32:08 crc kubenswrapper[4848]: I0128 14:32:08.537991 4848 scope.go:117] "RemoveContainer" containerID="5c9941bb402897ef4e6a4e943aad058d65b90eb5240e7d8bcec35f872573f084" Jan 28 14:32:08 crc kubenswrapper[4848]: I0128 14:32:08.826209 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-29w6z_must-gather-wb4fr_09dbd83c-c4e8-46d1-9b6e-1724a6e7e760/gather/0.log" Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.269875 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-29w6z/must-gather-wb4fr"] Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.270897 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-29w6z/must-gather-wb4fr" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerName="copy" containerID="cri-o://21132d3d2d36017e29e4a4b15892a72e6dd3827d7c62b837d01d41f77b30b20c" gracePeriod=2 Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.281162 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-29w6z/must-gather-wb4fr"] Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.666237 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-29w6z_must-gather-wb4fr_09dbd83c-c4e8-46d1-9b6e-1724a6e7e760/copy/0.log" Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.667032 4848 generic.go:334] "Generic (PLEG): container finished" podID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerID="21132d3d2d36017e29e4a4b15892a72e6dd3827d7c62b837d01d41f77b30b20c" exitCode=143 Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.787441 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-29w6z_must-gather-wb4fr_09dbd83c-c4e8-46d1-9b6e-1724a6e7e760/copy/0.log" Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.788325 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.828008 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn5z2\" (UniqueName: \"kubernetes.io/projected/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-kube-api-access-sn5z2\") pod \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.828829 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-must-gather-output\") pod \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\" (UID: \"09dbd83c-c4e8-46d1-9b6e-1724a6e7e760\") " Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.835830 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-kube-api-access-sn5z2" (OuterVolumeSpecName: "kube-api-access-sn5z2") pod "09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" (UID: "09dbd83c-c4e8-46d1-9b6e-1724a6e7e760"). InnerVolumeSpecName "kube-api-access-sn5z2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:32:18 crc kubenswrapper[4848]: I0128 14:32:18.933125 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn5z2\" (UniqueName: \"kubernetes.io/projected/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-kube-api-access-sn5z2\") on node \"crc\" DevicePath \"\"" Jan 28 14:32:19 crc kubenswrapper[4848]: I0128 14:32:19.091105 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" (UID: "09dbd83c-c4e8-46d1-9b6e-1724a6e7e760"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:32:19 crc kubenswrapper[4848]: I0128 14:32:19.137600 4848 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 28 14:32:19 crc kubenswrapper[4848]: I0128 14:32:19.681418 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-29w6z_must-gather-wb4fr_09dbd83c-c4e8-46d1-9b6e-1724a6e7e760/copy/0.log" Jan 28 14:32:19 crc kubenswrapper[4848]: I0128 14:32:19.682130 4848 scope.go:117] "RemoveContainer" containerID="21132d3d2d36017e29e4a4b15892a72e6dd3827d7c62b837d01d41f77b30b20c" Jan 28 14:32:19 crc kubenswrapper[4848]: I0128 14:32:19.682178 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-29w6z/must-gather-wb4fr" Jan 28 14:32:19 crc kubenswrapper[4848]: I0128 14:32:19.717981 4848 scope.go:117] "RemoveContainer" containerID="5c9941bb402897ef4e6a4e943aad058d65b90eb5240e7d8bcec35f872573f084" Jan 28 14:32:20 crc kubenswrapper[4848]: I0128 14:32:20.874215 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" path="/var/lib/kubelet/pods/09dbd83c-c4e8-46d1-9b6e-1724a6e7e760/volumes" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.245898 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zsv9k"] Jan 28 14:32:24 crc kubenswrapper[4848]: E0128 14:32:24.247646 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerName="gather" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.247669 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerName="gather" Jan 28 14:32:24 crc kubenswrapper[4848]: E0128 14:32:24.247689 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45210f69-d6f9-4366-9fa1-7d63d168ada6" containerName="collect-profiles" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.247698 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="45210f69-d6f9-4366-9fa1-7d63d168ada6" containerName="collect-profiles" Jan 28 14:32:24 crc kubenswrapper[4848]: E0128 14:32:24.247728 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerName="copy" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.247737 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerName="copy" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.248037 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerName="gather" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.248056 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="45210f69-d6f9-4366-9fa1-7d63d168ada6" containerName="collect-profiles" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.248069 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="09dbd83c-c4e8-46d1-9b6e-1724a6e7e760" containerName="copy" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.250458 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.266955 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zsv9k"] Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.308739 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f5jp\" (UniqueName: \"kubernetes.io/projected/5252c050-253b-4fe4-ace5-5c99f98cb3bc-kube-api-access-4f5jp\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.308804 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-catalog-content\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.308857 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-utilities\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.411130 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-utilities\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.411451 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f5jp\" (UniqueName: \"kubernetes.io/projected/5252c050-253b-4fe4-ace5-5c99f98cb3bc-kube-api-access-4f5jp\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.411494 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-catalog-content\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.411896 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-utilities\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.412050 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-catalog-content\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.482546 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f5jp\" (UniqueName: \"kubernetes.io/projected/5252c050-253b-4fe4-ace5-5c99f98cb3bc-kube-api-access-4f5jp\") pod \"community-operators-zsv9k\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:24 crc kubenswrapper[4848]: I0128 14:32:24.575853 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:25 crc kubenswrapper[4848]: I0128 14:32:25.334222 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zsv9k"] Jan 28 14:32:25 crc kubenswrapper[4848]: I0128 14:32:25.768806 4848 generic.go:334] "Generic (PLEG): container finished" podID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerID="3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6" exitCode=0 Jan 28 14:32:25 crc kubenswrapper[4848]: I0128 14:32:25.768976 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zsv9k" event={"ID":"5252c050-253b-4fe4-ace5-5c99f98cb3bc","Type":"ContainerDied","Data":"3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6"} Jan 28 14:32:25 crc kubenswrapper[4848]: I0128 14:32:25.769314 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zsv9k" event={"ID":"5252c050-253b-4fe4-ace5-5c99f98cb3bc","Type":"ContainerStarted","Data":"93db8e7153ded9b2e236c8f7bbbfa659e7bc3a0e4f4fc6aad993ef0607fffb85"} Jan 28 14:32:27 crc kubenswrapper[4848]: I0128 14:32:27.799326 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zsv9k" event={"ID":"5252c050-253b-4fe4-ace5-5c99f98cb3bc","Type":"ContainerStarted","Data":"fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67"} Jan 28 14:32:28 crc kubenswrapper[4848]: I0128 14:32:28.812365 4848 generic.go:334] "Generic (PLEG): container finished" podID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerID="fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67" exitCode=0 Jan 28 14:32:28 crc kubenswrapper[4848]: I0128 14:32:28.812475 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zsv9k" event={"ID":"5252c050-253b-4fe4-ace5-5c99f98cb3bc","Type":"ContainerDied","Data":"fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67"} Jan 28 14:32:29 crc kubenswrapper[4848]: I0128 14:32:29.825712 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zsv9k" event={"ID":"5252c050-253b-4fe4-ace5-5c99f98cb3bc","Type":"ContainerStarted","Data":"245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b"} Jan 28 14:32:29 crc kubenswrapper[4848]: I0128 14:32:29.855982 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zsv9k" podStartSLOduration=2.368643893 podStartE2EDuration="5.855958546s" podCreationTimestamp="2026-01-28 14:32:24 +0000 UTC" firstStartedPulling="2026-01-28 14:32:25.771998128 +0000 UTC m=+6372.684215166" lastFinishedPulling="2026-01-28 14:32:29.259312781 +0000 UTC m=+6376.171529819" observedRunningTime="2026-01-28 14:32:29.847674739 +0000 UTC m=+6376.759891787" watchObservedRunningTime="2026-01-28 14:32:29.855958546 +0000 UTC m=+6376.768175584" Jan 28 14:32:34 crc kubenswrapper[4848]: I0128 14:32:34.576312 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:34 crc kubenswrapper[4848]: I0128 14:32:34.576937 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:34 crc kubenswrapper[4848]: I0128 14:32:34.643339 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:34 crc kubenswrapper[4848]: I0128 14:32:34.942484 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:35 crc kubenswrapper[4848]: I0128 14:32:35.028757 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zsv9k"] Jan 28 14:32:36 crc kubenswrapper[4848]: I0128 14:32:36.898905 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zsv9k" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="registry-server" containerID="cri-o://245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b" gracePeriod=2 Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.486627 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.579492 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-catalog-content\") pod \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.579613 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-utilities\") pod \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.580668 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-utilities" (OuterVolumeSpecName: "utilities") pod "5252c050-253b-4fe4-ace5-5c99f98cb3bc" (UID: "5252c050-253b-4fe4-ace5-5c99f98cb3bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.580779 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f5jp\" (UniqueName: \"kubernetes.io/projected/5252c050-253b-4fe4-ace5-5c99f98cb3bc-kube-api-access-4f5jp\") pod \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\" (UID: \"5252c050-253b-4fe4-ace5-5c99f98cb3bc\") " Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.582472 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.591831 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5252c050-253b-4fe4-ace5-5c99f98cb3bc-kube-api-access-4f5jp" (OuterVolumeSpecName: "kube-api-access-4f5jp") pod "5252c050-253b-4fe4-ace5-5c99f98cb3bc" (UID: "5252c050-253b-4fe4-ace5-5c99f98cb3bc"). InnerVolumeSpecName "kube-api-access-4f5jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.646048 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5252c050-253b-4fe4-ace5-5c99f98cb3bc" (UID: "5252c050-253b-4fe4-ace5-5c99f98cb3bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.685625 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5252c050-253b-4fe4-ace5-5c99f98cb3bc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.685661 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f5jp\" (UniqueName: \"kubernetes.io/projected/5252c050-253b-4fe4-ace5-5c99f98cb3bc-kube-api-access-4f5jp\") on node \"crc\" DevicePath \"\"" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.911779 4848 generic.go:334] "Generic (PLEG): container finished" podID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerID="245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b" exitCode=0 Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.911835 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zsv9k" event={"ID":"5252c050-253b-4fe4-ace5-5c99f98cb3bc","Type":"ContainerDied","Data":"245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b"} Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.911861 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zsv9k" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.911885 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zsv9k" event={"ID":"5252c050-253b-4fe4-ace5-5c99f98cb3bc","Type":"ContainerDied","Data":"93db8e7153ded9b2e236c8f7bbbfa659e7bc3a0e4f4fc6aad993ef0607fffb85"} Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.911906 4848 scope.go:117] "RemoveContainer" containerID="245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.962723 4848 scope.go:117] "RemoveContainer" containerID="fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67" Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.968144 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zsv9k"] Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.977712 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zsv9k"] Jan 28 14:32:37 crc kubenswrapper[4848]: I0128 14:32:37.996201 4848 scope.go:117] "RemoveContainer" containerID="3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6" Jan 28 14:32:38 crc kubenswrapper[4848]: I0128 14:32:38.058584 4848 scope.go:117] "RemoveContainer" containerID="245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b" Jan 28 14:32:38 crc kubenswrapper[4848]: E0128 14:32:38.059592 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b\": container with ID starting with 245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b not found: ID does not exist" containerID="245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b" Jan 28 14:32:38 crc kubenswrapper[4848]: I0128 14:32:38.059652 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b"} err="failed to get container status \"245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b\": rpc error: code = NotFound desc = could not find container \"245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b\": container with ID starting with 245a946539cc0eadea96d9211cb08953d63478ea640bfad41cae7c5fd44d605b not found: ID does not exist" Jan 28 14:32:38 crc kubenswrapper[4848]: I0128 14:32:38.059691 4848 scope.go:117] "RemoveContainer" containerID="fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67" Jan 28 14:32:38 crc kubenswrapper[4848]: E0128 14:32:38.060197 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67\": container with ID starting with fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67 not found: ID does not exist" containerID="fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67" Jan 28 14:32:38 crc kubenswrapper[4848]: I0128 14:32:38.060280 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67"} err="failed to get container status \"fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67\": rpc error: code = NotFound desc = could not find container \"fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67\": container with ID starting with fe12d45c13034b34737497eda02b46a55783cf1231e28a516ed0b67b2f2d1b67 not found: ID does not exist" Jan 28 14:32:38 crc kubenswrapper[4848]: I0128 14:32:38.060317 4848 scope.go:117] "RemoveContainer" containerID="3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6" Jan 28 14:32:38 crc kubenswrapper[4848]: E0128 14:32:38.060636 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6\": container with ID starting with 3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6 not found: ID does not exist" containerID="3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6" Jan 28 14:32:38 crc kubenswrapper[4848]: I0128 14:32:38.060675 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6"} err="failed to get container status \"3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6\": rpc error: code = NotFound desc = could not find container \"3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6\": container with ID starting with 3a2471214e58f24ae54f4fc08ee8039824caeed561c027485a3e049c61b72dd6 not found: ID does not exist" Jan 28 14:32:38 crc kubenswrapper[4848]: I0128 14:32:38.865270 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" path="/var/lib/kubelet/pods/5252c050-253b-4fe4-ace5-5c99f98cb3bc/volumes" Jan 28 14:34:07 crc kubenswrapper[4848]: I0128 14:34:07.924185 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:34:07 crc kubenswrapper[4848]: I0128 14:34:07.924798 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:34:37 crc kubenswrapper[4848]: I0128 14:34:37.924792 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:34:37 crc kubenswrapper[4848]: I0128 14:34:37.925764 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:35:07 crc kubenswrapper[4848]: I0128 14:35:07.924964 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:35:07 crc kubenswrapper[4848]: I0128 14:35:07.925579 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:35:07 crc kubenswrapper[4848]: I0128 14:35:07.925652 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:35:07 crc kubenswrapper[4848]: I0128 14:35:07.926645 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"490c280e1157715c8bda663dcfbeb8881688ac0226ce28dcc3e38c2ea7cab704"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:35:07 crc kubenswrapper[4848]: I0128 14:35:07.926719 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://490c280e1157715c8bda663dcfbeb8881688ac0226ce28dcc3e38c2ea7cab704" gracePeriod=600 Jan 28 14:35:09 crc kubenswrapper[4848]: I0128 14:35:09.042219 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="490c280e1157715c8bda663dcfbeb8881688ac0226ce28dcc3e38c2ea7cab704" exitCode=0 Jan 28 14:35:09 crc kubenswrapper[4848]: I0128 14:35:09.042468 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"490c280e1157715c8bda663dcfbeb8881688ac0226ce28dcc3e38c2ea7cab704"} Jan 28 14:35:09 crc kubenswrapper[4848]: I0128 14:35:09.043060 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef"} Jan 28 14:35:09 crc kubenswrapper[4848]: I0128 14:35:09.043099 4848 scope.go:117] "RemoveContainer" containerID="c6cd0fa5c6f4395b0a88628890fe3855667402d73e9fd36c330ccae57e8ed219" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.552098 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ffllp/must-gather-zvt4j"] Jan 28 14:35:34 crc kubenswrapper[4848]: E0128 14:35:34.553365 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="registry-server" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.553379 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="registry-server" Jan 28 14:35:34 crc kubenswrapper[4848]: E0128 14:35:34.553399 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="extract-utilities" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.553407 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="extract-utilities" Jan 28 14:35:34 crc kubenswrapper[4848]: E0128 14:35:34.553424 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="extract-content" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.553431 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="extract-content" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.553662 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5252c050-253b-4fe4-ace5-5c99f98cb3bc" containerName="registry-server" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.554852 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.559410 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-ffllp"/"openshift-service-ca.crt" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.559516 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-ffllp"/"kube-root-ca.crt" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.561596 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-ffllp"/"default-dockercfg-vbdgp" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.601385 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-ffllp/must-gather-zvt4j"] Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.633898 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdtsh\" (UniqueName: \"kubernetes.io/projected/b9fee44b-257e-4daa-9611-3216ef6be666-kube-api-access-qdtsh\") pod \"must-gather-zvt4j\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.634032 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b9fee44b-257e-4daa-9611-3216ef6be666-must-gather-output\") pod \"must-gather-zvt4j\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.738328 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdtsh\" (UniqueName: \"kubernetes.io/projected/b9fee44b-257e-4daa-9611-3216ef6be666-kube-api-access-qdtsh\") pod \"must-gather-zvt4j\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.738507 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b9fee44b-257e-4daa-9611-3216ef6be666-must-gather-output\") pod \"must-gather-zvt4j\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.739039 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b9fee44b-257e-4daa-9611-3216ef6be666-must-gather-output\") pod \"must-gather-zvt4j\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.779123 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdtsh\" (UniqueName: \"kubernetes.io/projected/b9fee44b-257e-4daa-9611-3216ef6be666-kube-api-access-qdtsh\") pod \"must-gather-zvt4j\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:34 crc kubenswrapper[4848]: I0128 14:35:34.882008 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:35:35 crc kubenswrapper[4848]: I0128 14:35:35.551750 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-ffllp/must-gather-zvt4j"] Jan 28 14:35:36 crc kubenswrapper[4848]: I0128 14:35:36.426182 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/must-gather-zvt4j" event={"ID":"b9fee44b-257e-4daa-9611-3216ef6be666","Type":"ContainerStarted","Data":"765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c"} Jan 28 14:35:36 crc kubenswrapper[4848]: I0128 14:35:36.427126 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/must-gather-zvt4j" event={"ID":"b9fee44b-257e-4daa-9611-3216ef6be666","Type":"ContainerStarted","Data":"1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388"} Jan 28 14:35:36 crc kubenswrapper[4848]: I0128 14:35:36.427167 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/must-gather-zvt4j" event={"ID":"b9fee44b-257e-4daa-9611-3216ef6be666","Type":"ContainerStarted","Data":"9c27fc602f87f818317e30026fc9e4390261a2c1d60a5e1f3ce595720574fe9c"} Jan 28 14:35:36 crc kubenswrapper[4848]: I0128 14:35:36.450480 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-ffllp/must-gather-zvt4j" podStartSLOduration=2.45045675 podStartE2EDuration="2.45045675s" podCreationTimestamp="2026-01-28 14:35:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 14:35:36.44570977 +0000 UTC m=+6563.357926818" watchObservedRunningTime="2026-01-28 14:35:36.45045675 +0000 UTC m=+6563.362673788" Jan 28 14:35:40 crc kubenswrapper[4848]: I0128 14:35:40.937361 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ffllp/crc-debug-frzrw"] Jan 28 14:35:40 crc kubenswrapper[4848]: I0128 14:35:40.939708 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:40 crc kubenswrapper[4848]: I0128 14:35:40.971497 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/26f6ab6e-20ec-478a-be18-61bb5377a9dd-host\") pod \"crc-debug-frzrw\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:40 crc kubenswrapper[4848]: I0128 14:35:40.971642 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqlcf\" (UniqueName: \"kubernetes.io/projected/26f6ab6e-20ec-478a-be18-61bb5377a9dd-kube-api-access-nqlcf\") pod \"crc-debug-frzrw\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:41 crc kubenswrapper[4848]: I0128 14:35:41.073629 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/26f6ab6e-20ec-478a-be18-61bb5377a9dd-host\") pod \"crc-debug-frzrw\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:41 crc kubenswrapper[4848]: I0128 14:35:41.074095 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqlcf\" (UniqueName: \"kubernetes.io/projected/26f6ab6e-20ec-478a-be18-61bb5377a9dd-kube-api-access-nqlcf\") pod \"crc-debug-frzrw\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:41 crc kubenswrapper[4848]: I0128 14:35:41.073797 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/26f6ab6e-20ec-478a-be18-61bb5377a9dd-host\") pod \"crc-debug-frzrw\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:41 crc kubenswrapper[4848]: I0128 14:35:41.097404 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqlcf\" (UniqueName: \"kubernetes.io/projected/26f6ab6e-20ec-478a-be18-61bb5377a9dd-kube-api-access-nqlcf\") pod \"crc-debug-frzrw\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:41 crc kubenswrapper[4848]: I0128 14:35:41.266700 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:35:41 crc kubenswrapper[4848]: I0128 14:35:41.496079 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-frzrw" event={"ID":"26f6ab6e-20ec-478a-be18-61bb5377a9dd","Type":"ContainerStarted","Data":"d82abf8f6f77b4fd3966e4454d446d0e215b523d36da00621e07d173f035213a"} Jan 28 14:35:42 crc kubenswrapper[4848]: I0128 14:35:42.511630 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-frzrw" event={"ID":"26f6ab6e-20ec-478a-be18-61bb5377a9dd","Type":"ContainerStarted","Data":"4bdb4c59ab77405f74948ff9dc800611074f077efaf75b148bcc170b95fa1570"} Jan 28 14:35:42 crc kubenswrapper[4848]: I0128 14:35:42.529517 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-ffllp/crc-debug-frzrw" podStartSLOduration=2.529485421 podStartE2EDuration="2.529485421s" podCreationTimestamp="2026-01-28 14:35:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 14:35:42.527948439 +0000 UTC m=+6569.440165497" watchObservedRunningTime="2026-01-28 14:35:42.529485421 +0000 UTC m=+6569.441702499" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.411637 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lnp5s"] Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.415795 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.456392 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lnp5s"] Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.472935 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-catalog-content\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.472993 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pxqq\" (UniqueName: \"kubernetes.io/projected/afd358f2-b4b7-4276-b233-77af442ebb29-kube-api-access-8pxqq\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.473023 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-utilities\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.575475 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-catalog-content\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.575759 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pxqq\" (UniqueName: \"kubernetes.io/projected/afd358f2-b4b7-4276-b233-77af442ebb29-kube-api-access-8pxqq\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.575782 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-utilities\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.576044 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-catalog-content\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.576281 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-utilities\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.615478 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pxqq\" (UniqueName: \"kubernetes.io/projected/afd358f2-b4b7-4276-b233-77af442ebb29-kube-api-access-8pxqq\") pod \"redhat-marketplace-lnp5s\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:09 crc kubenswrapper[4848]: I0128 14:36:09.755746 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:10 crc kubenswrapper[4848]: I0128 14:36:10.350073 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lnp5s"] Jan 28 14:36:10 crc kubenswrapper[4848]: W0128 14:36:10.358699 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podafd358f2_b4b7_4276_b233_77af442ebb29.slice/crio-5bb5ff37dcc7c7673de4b0c97a83e6ae5cd6d3ed82cb6735d968a864a6df2325 WatchSource:0}: Error finding container 5bb5ff37dcc7c7673de4b0c97a83e6ae5cd6d3ed82cb6735d968a864a6df2325: Status 404 returned error can't find the container with id 5bb5ff37dcc7c7673de4b0c97a83e6ae5cd6d3ed82cb6735d968a864a6df2325 Jan 28 14:36:10 crc kubenswrapper[4848]: I0128 14:36:10.848298 4848 generic.go:334] "Generic (PLEG): container finished" podID="afd358f2-b4b7-4276-b233-77af442ebb29" containerID="171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a" exitCode=0 Jan 28 14:36:10 crc kubenswrapper[4848]: I0128 14:36:10.848478 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lnp5s" event={"ID":"afd358f2-b4b7-4276-b233-77af442ebb29","Type":"ContainerDied","Data":"171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a"} Jan 28 14:36:10 crc kubenswrapper[4848]: I0128 14:36:10.848596 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lnp5s" event={"ID":"afd358f2-b4b7-4276-b233-77af442ebb29","Type":"ContainerStarted","Data":"5bb5ff37dcc7c7673de4b0c97a83e6ae5cd6d3ed82cb6735d968a864a6df2325"} Jan 28 14:36:10 crc kubenswrapper[4848]: I0128 14:36:10.852889 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 14:36:11 crc kubenswrapper[4848]: I0128 14:36:11.862357 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lnp5s" event={"ID":"afd358f2-b4b7-4276-b233-77af442ebb29","Type":"ContainerStarted","Data":"e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa"} Jan 28 14:36:12 crc kubenswrapper[4848]: I0128 14:36:12.872095 4848 generic.go:334] "Generic (PLEG): container finished" podID="afd358f2-b4b7-4276-b233-77af442ebb29" containerID="e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa" exitCode=0 Jan 28 14:36:12 crc kubenswrapper[4848]: I0128 14:36:12.872545 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lnp5s" event={"ID":"afd358f2-b4b7-4276-b233-77af442ebb29","Type":"ContainerDied","Data":"e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa"} Jan 28 14:36:13 crc kubenswrapper[4848]: I0128 14:36:13.884302 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lnp5s" event={"ID":"afd358f2-b4b7-4276-b233-77af442ebb29","Type":"ContainerStarted","Data":"844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4"} Jan 28 14:36:13 crc kubenswrapper[4848]: I0128 14:36:13.910836 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lnp5s" podStartSLOduration=2.381442346 podStartE2EDuration="4.910813812s" podCreationTimestamp="2026-01-28 14:36:09 +0000 UTC" firstStartedPulling="2026-01-28 14:36:10.852665833 +0000 UTC m=+6597.764882871" lastFinishedPulling="2026-01-28 14:36:13.382037299 +0000 UTC m=+6600.294254337" observedRunningTime="2026-01-28 14:36:13.902602268 +0000 UTC m=+6600.814819316" watchObservedRunningTime="2026-01-28 14:36:13.910813812 +0000 UTC m=+6600.823030840" Jan 28 14:36:19 crc kubenswrapper[4848]: I0128 14:36:19.756307 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:19 crc kubenswrapper[4848]: I0128 14:36:19.756793 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:19 crc kubenswrapper[4848]: I0128 14:36:19.814638 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:20 crc kubenswrapper[4848]: I0128 14:36:20.014848 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:20 crc kubenswrapper[4848]: I0128 14:36:20.082624 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lnp5s"] Jan 28 14:36:21 crc kubenswrapper[4848]: I0128 14:36:21.976434 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lnp5s" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="registry-server" containerID="cri-o://844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4" gracePeriod=2 Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.525538 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.710307 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-catalog-content\") pod \"afd358f2-b4b7-4276-b233-77af442ebb29\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.710361 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-utilities\") pod \"afd358f2-b4b7-4276-b233-77af442ebb29\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.710638 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pxqq\" (UniqueName: \"kubernetes.io/projected/afd358f2-b4b7-4276-b233-77af442ebb29-kube-api-access-8pxqq\") pod \"afd358f2-b4b7-4276-b233-77af442ebb29\" (UID: \"afd358f2-b4b7-4276-b233-77af442ebb29\") " Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.711551 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-utilities" (OuterVolumeSpecName: "utilities") pod "afd358f2-b4b7-4276-b233-77af442ebb29" (UID: "afd358f2-b4b7-4276-b233-77af442ebb29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.729568 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afd358f2-b4b7-4276-b233-77af442ebb29-kube-api-access-8pxqq" (OuterVolumeSpecName: "kube-api-access-8pxqq") pod "afd358f2-b4b7-4276-b233-77af442ebb29" (UID: "afd358f2-b4b7-4276-b233-77af442ebb29"). InnerVolumeSpecName "kube-api-access-8pxqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.749033 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "afd358f2-b4b7-4276-b233-77af442ebb29" (UID: "afd358f2-b4b7-4276-b233-77af442ebb29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.813879 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.813927 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd358f2-b4b7-4276-b233-77af442ebb29-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.813945 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pxqq\" (UniqueName: \"kubernetes.io/projected/afd358f2-b4b7-4276-b233-77af442ebb29-kube-api-access-8pxqq\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.987812 4848 generic.go:334] "Generic (PLEG): container finished" podID="afd358f2-b4b7-4276-b233-77af442ebb29" containerID="844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4" exitCode=0 Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.987878 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lnp5s" event={"ID":"afd358f2-b4b7-4276-b233-77af442ebb29","Type":"ContainerDied","Data":"844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4"} Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.987899 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lnp5s" Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.987920 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lnp5s" event={"ID":"afd358f2-b4b7-4276-b233-77af442ebb29","Type":"ContainerDied","Data":"5bb5ff37dcc7c7673de4b0c97a83e6ae5cd6d3ed82cb6735d968a864a6df2325"} Jan 28 14:36:22 crc kubenswrapper[4848]: I0128 14:36:22.987940 4848 scope.go:117] "RemoveContainer" containerID="844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.016375 4848 scope.go:117] "RemoveContainer" containerID="e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.022545 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lnp5s"] Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.040411 4848 scope.go:117] "RemoveContainer" containerID="171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.067425 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lnp5s"] Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.099537 4848 scope.go:117] "RemoveContainer" containerID="844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4" Jan 28 14:36:23 crc kubenswrapper[4848]: E0128 14:36:23.100098 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4\": container with ID starting with 844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4 not found: ID does not exist" containerID="844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.100137 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4"} err="failed to get container status \"844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4\": rpc error: code = NotFound desc = could not find container \"844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4\": container with ID starting with 844b73da605d85f9ea0da436b382d6703f39682d1c82bbe1c52414c2e0ac7db4 not found: ID does not exist" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.100162 4848 scope.go:117] "RemoveContainer" containerID="e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa" Jan 28 14:36:23 crc kubenswrapper[4848]: E0128 14:36:23.100574 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa\": container with ID starting with e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa not found: ID does not exist" containerID="e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.100599 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa"} err="failed to get container status \"e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa\": rpc error: code = NotFound desc = could not find container \"e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa\": container with ID starting with e19b69799d1a52c98485bd13096cbe6177e9df803118a109e37e96c471aaf6aa not found: ID does not exist" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.100621 4848 scope.go:117] "RemoveContainer" containerID="171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a" Jan 28 14:36:23 crc kubenswrapper[4848]: E0128 14:36:23.101071 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a\": container with ID starting with 171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a not found: ID does not exist" containerID="171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a" Jan 28 14:36:23 crc kubenswrapper[4848]: I0128 14:36:23.101123 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a"} err="failed to get container status \"171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a\": rpc error: code = NotFound desc = could not find container \"171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a\": container with ID starting with 171a9006266644d58e7f3a12335055c5cc6bf8b112eb3251ebf46d5da21f9f1a not found: ID does not exist" Jan 28 14:36:24 crc kubenswrapper[4848]: I0128 14:36:24.862713 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" path="/var/lib/kubelet/pods/afd358f2-b4b7-4276-b233-77af442ebb29/volumes" Jan 28 14:36:25 crc kubenswrapper[4848]: I0128 14:36:25.015036 4848 generic.go:334] "Generic (PLEG): container finished" podID="26f6ab6e-20ec-478a-be18-61bb5377a9dd" containerID="4bdb4c59ab77405f74948ff9dc800611074f077efaf75b148bcc170b95fa1570" exitCode=0 Jan 28 14:36:25 crc kubenswrapper[4848]: I0128 14:36:25.015092 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-frzrw" event={"ID":"26f6ab6e-20ec-478a-be18-61bb5377a9dd","Type":"ContainerDied","Data":"4bdb4c59ab77405f74948ff9dc800611074f077efaf75b148bcc170b95fa1570"} Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.154008 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.194831 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ffllp/crc-debug-frzrw"] Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.204335 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ffllp/crc-debug-frzrw"] Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.205409 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/26f6ab6e-20ec-478a-be18-61bb5377a9dd-host\") pod \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.205540 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/26f6ab6e-20ec-478a-be18-61bb5377a9dd-host" (OuterVolumeSpecName: "host") pod "26f6ab6e-20ec-478a-be18-61bb5377a9dd" (UID: "26f6ab6e-20ec-478a-be18-61bb5377a9dd"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.205617 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqlcf\" (UniqueName: \"kubernetes.io/projected/26f6ab6e-20ec-478a-be18-61bb5377a9dd-kube-api-access-nqlcf\") pod \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\" (UID: \"26f6ab6e-20ec-478a-be18-61bb5377a9dd\") " Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.206456 4848 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/26f6ab6e-20ec-478a-be18-61bb5377a9dd-host\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.214090 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26f6ab6e-20ec-478a-be18-61bb5377a9dd-kube-api-access-nqlcf" (OuterVolumeSpecName: "kube-api-access-nqlcf") pod "26f6ab6e-20ec-478a-be18-61bb5377a9dd" (UID: "26f6ab6e-20ec-478a-be18-61bb5377a9dd"). InnerVolumeSpecName "kube-api-access-nqlcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.308492 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqlcf\" (UniqueName: \"kubernetes.io/projected/26f6ab6e-20ec-478a-be18-61bb5377a9dd-kube-api-access-nqlcf\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:26 crc kubenswrapper[4848]: I0128 14:36:26.881873 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26f6ab6e-20ec-478a-be18-61bb5377a9dd" path="/var/lib/kubelet/pods/26f6ab6e-20ec-478a-be18-61bb5377a9dd/volumes" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.040459 4848 scope.go:117] "RemoveContainer" containerID="4bdb4c59ab77405f74948ff9dc800611074f077efaf75b148bcc170b95fa1570" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.040493 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-frzrw" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.408234 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ffllp/crc-debug-rqrdv"] Jan 28 14:36:27 crc kubenswrapper[4848]: E0128 14:36:27.408907 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="registry-server" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.408920 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="registry-server" Jan 28 14:36:27 crc kubenswrapper[4848]: E0128 14:36:27.408933 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26f6ab6e-20ec-478a-be18-61bb5377a9dd" containerName="container-00" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.408939 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="26f6ab6e-20ec-478a-be18-61bb5377a9dd" containerName="container-00" Jan 28 14:36:27 crc kubenswrapper[4848]: E0128 14:36:27.408955 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="extract-utilities" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.408963 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="extract-utilities" Jan 28 14:36:27 crc kubenswrapper[4848]: E0128 14:36:27.408981 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="extract-content" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.408987 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="extract-content" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.409178 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="afd358f2-b4b7-4276-b233-77af442ebb29" containerName="registry-server" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.409200 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="26f6ab6e-20ec-478a-be18-61bb5377a9dd" containerName="container-00" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.409912 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.432967 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-host\") pod \"crc-debug-rqrdv\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.433128 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f44k5\" (UniqueName: \"kubernetes.io/projected/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-kube-api-access-f44k5\") pod \"crc-debug-rqrdv\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.535104 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f44k5\" (UniqueName: \"kubernetes.io/projected/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-kube-api-access-f44k5\") pod \"crc-debug-rqrdv\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.535321 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-host\") pod \"crc-debug-rqrdv\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.535449 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-host\") pod \"crc-debug-rqrdv\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.567830 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f44k5\" (UniqueName: \"kubernetes.io/projected/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-kube-api-access-f44k5\") pod \"crc-debug-rqrdv\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:27 crc kubenswrapper[4848]: I0128 14:36:27.731594 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:28 crc kubenswrapper[4848]: I0128 14:36:28.072152 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" event={"ID":"f21cdcd1-8581-43cd-8a14-4472c43e6ea9","Type":"ContainerStarted","Data":"3bb555b620c7d5b650c05d6d286c4efcb2b197b3cc63ccfcb23b43e6cbc75c18"} Jan 28 14:36:28 crc kubenswrapper[4848]: I0128 14:36:28.072644 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" event={"ID":"f21cdcd1-8581-43cd-8a14-4472c43e6ea9","Type":"ContainerStarted","Data":"eabeeffe5bf6ce86ffd918831f03d604150652097b002743fc4930304ea60695"} Jan 28 14:36:28 crc kubenswrapper[4848]: I0128 14:36:28.097868 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" podStartSLOduration=1.097839972 podStartE2EDuration="1.097839972s" podCreationTimestamp="2026-01-28 14:36:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 14:36:28.088139467 +0000 UTC m=+6615.000356505" watchObservedRunningTime="2026-01-28 14:36:28.097839972 +0000 UTC m=+6615.010057010" Jan 28 14:36:29 crc kubenswrapper[4848]: I0128 14:36:29.094693 4848 generic.go:334] "Generic (PLEG): container finished" podID="f21cdcd1-8581-43cd-8a14-4472c43e6ea9" containerID="3bb555b620c7d5b650c05d6d286c4efcb2b197b3cc63ccfcb23b43e6cbc75c18" exitCode=0 Jan 28 14:36:29 crc kubenswrapper[4848]: I0128 14:36:29.094962 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" event={"ID":"f21cdcd1-8581-43cd-8a14-4472c43e6ea9","Type":"ContainerDied","Data":"3bb555b620c7d5b650c05d6d286c4efcb2b197b3cc63ccfcb23b43e6cbc75c18"} Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.235661 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.423060 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-host\") pod \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.423200 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f44k5\" (UniqueName: \"kubernetes.io/projected/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-kube-api-access-f44k5\") pod \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\" (UID: \"f21cdcd1-8581-43cd-8a14-4472c43e6ea9\") " Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.423435 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-host" (OuterVolumeSpecName: "host") pod "f21cdcd1-8581-43cd-8a14-4472c43e6ea9" (UID: "f21cdcd1-8581-43cd-8a14-4472c43e6ea9"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.423989 4848 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-host\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.430002 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-kube-api-access-f44k5" (OuterVolumeSpecName: "kube-api-access-f44k5") pod "f21cdcd1-8581-43cd-8a14-4472c43e6ea9" (UID: "f21cdcd1-8581-43cd-8a14-4472c43e6ea9"). InnerVolumeSpecName "kube-api-access-f44k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.526331 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f44k5\" (UniqueName: \"kubernetes.io/projected/f21cdcd1-8581-43cd-8a14-4472c43e6ea9-kube-api-access-f44k5\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.536264 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ffllp/crc-debug-rqrdv"] Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.555086 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ffllp/crc-debug-rqrdv"] Jan 28 14:36:30 crc kubenswrapper[4848]: I0128 14:36:30.866495 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f21cdcd1-8581-43cd-8a14-4472c43e6ea9" path="/var/lib/kubelet/pods/f21cdcd1-8581-43cd-8a14-4472c43e6ea9/volumes" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.121699 4848 scope.go:117] "RemoveContainer" containerID="3bb555b620c7d5b650c05d6d286c4efcb2b197b3cc63ccfcb23b43e6cbc75c18" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.121749 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-rqrdv" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.786111 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ffllp/crc-debug-4frcd"] Jan 28 14:36:31 crc kubenswrapper[4848]: E0128 14:36:31.786962 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f21cdcd1-8581-43cd-8a14-4472c43e6ea9" containerName="container-00" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.786979 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="f21cdcd1-8581-43cd-8a14-4472c43e6ea9" containerName="container-00" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.787292 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="f21cdcd1-8581-43cd-8a14-4472c43e6ea9" containerName="container-00" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.788236 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.864228 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2zf4\" (UniqueName: \"kubernetes.io/projected/576e59d9-671e-4d47-805c-932887d99d63-kube-api-access-b2zf4\") pod \"crc-debug-4frcd\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.865006 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/576e59d9-671e-4d47-805c-932887d99d63-host\") pod \"crc-debug-4frcd\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.967666 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2zf4\" (UniqueName: \"kubernetes.io/projected/576e59d9-671e-4d47-805c-932887d99d63-kube-api-access-b2zf4\") pod \"crc-debug-4frcd\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.968185 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/576e59d9-671e-4d47-805c-932887d99d63-host\") pod \"crc-debug-4frcd\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:31 crc kubenswrapper[4848]: I0128 14:36:31.968377 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/576e59d9-671e-4d47-805c-932887d99d63-host\") pod \"crc-debug-4frcd\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:32 crc kubenswrapper[4848]: I0128 14:36:32.004301 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2zf4\" (UniqueName: \"kubernetes.io/projected/576e59d9-671e-4d47-805c-932887d99d63-kube-api-access-b2zf4\") pod \"crc-debug-4frcd\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:32 crc kubenswrapper[4848]: I0128 14:36:32.122090 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:32 crc kubenswrapper[4848]: W0128 14:36:32.164517 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod576e59d9_671e_4d47_805c_932887d99d63.slice/crio-8a6ecaef0b750900811ad5ca5c712db8d87bfce661d8ea8447739c4a2d198211 WatchSource:0}: Error finding container 8a6ecaef0b750900811ad5ca5c712db8d87bfce661d8ea8447739c4a2d198211: Status 404 returned error can't find the container with id 8a6ecaef0b750900811ad5ca5c712db8d87bfce661d8ea8447739c4a2d198211 Jan 28 14:36:33 crc kubenswrapper[4848]: I0128 14:36:33.146718 4848 generic.go:334] "Generic (PLEG): container finished" podID="576e59d9-671e-4d47-805c-932887d99d63" containerID="88dc4efa7e9b181e4e690937a975375a6affa15310ffa523c6e5358aeac82487" exitCode=0 Jan 28 14:36:33 crc kubenswrapper[4848]: I0128 14:36:33.146810 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-4frcd" event={"ID":"576e59d9-671e-4d47-805c-932887d99d63","Type":"ContainerDied","Data":"88dc4efa7e9b181e4e690937a975375a6affa15310ffa523c6e5358aeac82487"} Jan 28 14:36:33 crc kubenswrapper[4848]: I0128 14:36:33.147111 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/crc-debug-4frcd" event={"ID":"576e59d9-671e-4d47-805c-932887d99d63","Type":"ContainerStarted","Data":"8a6ecaef0b750900811ad5ca5c712db8d87bfce661d8ea8447739c4a2d198211"} Jan 28 14:36:33 crc kubenswrapper[4848]: I0128 14:36:33.193927 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ffllp/crc-debug-4frcd"] Jan 28 14:36:33 crc kubenswrapper[4848]: I0128 14:36:33.203291 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ffllp/crc-debug-4frcd"] Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.288322 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.428151 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/576e59d9-671e-4d47-805c-932887d99d63-host\") pod \"576e59d9-671e-4d47-805c-932887d99d63\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.428353 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2zf4\" (UniqueName: \"kubernetes.io/projected/576e59d9-671e-4d47-805c-932887d99d63-kube-api-access-b2zf4\") pod \"576e59d9-671e-4d47-805c-932887d99d63\" (UID: \"576e59d9-671e-4d47-805c-932887d99d63\") " Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.428455 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/576e59d9-671e-4d47-805c-932887d99d63-host" (OuterVolumeSpecName: "host") pod "576e59d9-671e-4d47-805c-932887d99d63" (UID: "576e59d9-671e-4d47-805c-932887d99d63"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.430671 4848 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/576e59d9-671e-4d47-805c-932887d99d63-host\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.452580 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/576e59d9-671e-4d47-805c-932887d99d63-kube-api-access-b2zf4" (OuterVolumeSpecName: "kube-api-access-b2zf4") pod "576e59d9-671e-4d47-805c-932887d99d63" (UID: "576e59d9-671e-4d47-805c-932887d99d63"). InnerVolumeSpecName "kube-api-access-b2zf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.532753 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2zf4\" (UniqueName: \"kubernetes.io/projected/576e59d9-671e-4d47-805c-932887d99d63-kube-api-access-b2zf4\") on node \"crc\" DevicePath \"\"" Jan 28 14:36:34 crc kubenswrapper[4848]: I0128 14:36:34.864268 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="576e59d9-671e-4d47-805c-932887d99d63" path="/var/lib/kubelet/pods/576e59d9-671e-4d47-805c-932887d99d63/volumes" Jan 28 14:36:35 crc kubenswrapper[4848]: I0128 14:36:35.169268 4848 scope.go:117] "RemoveContainer" containerID="88dc4efa7e9b181e4e690937a975375a6affa15310ffa523c6e5358aeac82487" Jan 28 14:36:35 crc kubenswrapper[4848]: I0128 14:36:35.169301 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/crc-debug-4frcd" Jan 28 14:37:29 crc kubenswrapper[4848]: I0128 14:37:29.562439 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-669cc887b-rnh7b_feba4e43-dc8d-455e-a760-82f68f781511/barbican-api/0.log" Jan 28 14:37:29 crc kubenswrapper[4848]: I0128 14:37:29.751667 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-669cc887b-rnh7b_feba4e43-dc8d-455e-a760-82f68f781511/barbican-api-log/0.log" Jan 28 14:37:29 crc kubenswrapper[4848]: I0128 14:37:29.801109 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5cb69d9f6b-f9ck5_7a72021f-6e14-4681-b127-7c85be7c597c/barbican-keystone-listener/0.log" Jan 28 14:37:29 crc kubenswrapper[4848]: I0128 14:37:29.927536 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5cb69d9f6b-f9ck5_7a72021f-6e14-4681-b127-7c85be7c597c/barbican-keystone-listener-log/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.018039 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bd96c8879-gdtwm_a56e7c4e-4ce2-4742-8645-6201f8c957f7/barbican-worker/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.091156 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bd96c8879-gdtwm_a56e7c4e-4ce2-4742-8645-6201f8c957f7/barbican-worker-log/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.301871 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-9xxd9_64b9b93d-fe00-440a-88b0-dbb5f4621be9/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.449189 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/ceilometer-central-agent/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.561081 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/ceilometer-notification-agent/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.562813 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/proxy-httpd/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.609691 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_d27898c6-95da-4dfc-908d-36876a776c2d/sg-core/0.log" Jan 28 14:37:30 crc kubenswrapper[4848]: I0128 14:37:30.916434 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0a69cc57-5cf8-4b44-a956-5641d66512fa/cinder-api-log/0.log" Jan 28 14:37:31 crc kubenswrapper[4848]: I0128 14:37:31.215086 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_b4edacab-a671-4ace-8bb5-bd113d2c666b/probe/0.log" Jan 28 14:37:31 crc kubenswrapper[4848]: I0128 14:37:31.476857 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_b4edacab-a671-4ace-8bb5-bd113d2c666b/cinder-backup/0.log" Jan 28 14:37:31 crc kubenswrapper[4848]: I0128 14:37:31.581093 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_04fa376f-7bc1-48da-870a-e8bb086f0263/probe/0.log" Jan 28 14:37:31 crc kubenswrapper[4848]: I0128 14:37:31.605217 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_04fa376f-7bc1-48da-870a-e8bb086f0263/cinder-scheduler/0.log" Jan 28 14:37:31 crc kubenswrapper[4848]: I0128 14:37:31.716498 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0a69cc57-5cf8-4b44-a956-5641d66512fa/cinder-api/0.log" Jan 28 14:37:31 crc kubenswrapper[4848]: I0128 14:37:31.935574 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_f434c780-9c6b-4fa2-b5a2-0220b134bb73/probe/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.148007 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_f434c780-9c6b-4fa2-b5a2-0220b134bb73/cinder-volume/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.217963 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_d004b545-6c1d-42f8-93cb-be2549026492/probe/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.248188 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_d004b545-6c1d-42f8-93cb-be2549026492/cinder-volume/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.429946 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-xtxrc_354c2496-37a2-4d9c-9439-42b042ca2639/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.531430 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-ghk2c_dee48f15-f76a-4039-b7a1-85c61a4d2ed3/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.663201 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55b94cdbb7-56ttn_91425abd-325a-48c8-9c49-34b409614808/init/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.888895 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55b94cdbb7-56ttn_91425abd-325a-48c8-9c49-34b409614808/init/0.log" Jan 28 14:37:32 crc kubenswrapper[4848]: I0128 14:37:32.923464 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-vtqf6_5e4858dc-29e9-4e1a-8629-73fa7b2f3b7b/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.077949 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55b94cdbb7-56ttn_91425abd-325a-48c8-9c49-34b409614808/dnsmasq-dns/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.185131 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0ab62279-8f3a-4ad3-8de4-84c72ad421a1/glance-httpd/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.221724 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0ab62279-8f3a-4ad3-8de4-84c72ad421a1/glance-log/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.380957 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_03e938c1-a61a-4c60-9d8e-660cefebc2fc/glance-httpd/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.430054 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_03e938c1-a61a-4c60-9d8e-660cefebc2fc/glance-log/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.644208 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68f5655b9d-76qsp_dfa56dc1-1635-454c-95e0-74fdedcf8b00/horizon/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.698377 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-2d6hr_4acf7592-041f-43a4-b85b-a2fac8dbdc3c/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.988035 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29493481-q89gd_7b8bd23c-f6a7-4d2d-9d6d-86b5eae94502/keystone-cron/0.log" Jan 28 14:37:33 crc kubenswrapper[4848]: I0128 14:37:33.989329 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-zjdcb_63a4d58d-3a42-4ddc-b735-af5e71c2ffd3/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:34 crc kubenswrapper[4848]: I0128 14:37:34.339619 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_791ef386-40ae-4395-aa5d-b86f13307c6c/kube-state-metrics/0.log" Jan 28 14:37:34 crc kubenswrapper[4848]: I0128 14:37:34.559193 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68f5655b9d-76qsp_dfa56dc1-1635-454c-95e0-74fdedcf8b00/horizon-log/0.log" Jan 28 14:37:34 crc kubenswrapper[4848]: I0128 14:37:34.802368 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-thzjf_d7d62ad6-1d49-4de9-b7c1-0494e7b12ef8/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:34 crc kubenswrapper[4848]: I0128 14:37:34.836756 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-d454d7fbb-hth9j_e364a091-9a40-455c-b2dc-fd9a5d51181a/keystone-api/0.log" Jan 28 14:37:35 crc kubenswrapper[4848]: I0128 14:37:35.292879 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-6l98p_d991fbd4-087c-475f-99cb-ccfab86bda67/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:35 crc kubenswrapper[4848]: I0128 14:37:35.364929 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6997cd7cdf-nf254_ac7966e3-99c4-4e7c-b2d6-7229c78ca5db/neutron-httpd/0.log" Jan 28 14:37:35 crc kubenswrapper[4848]: I0128 14:37:35.517160 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6997cd7cdf-nf254_ac7966e3-99c4-4e7c-b2d6-7229c78ca5db/neutron-api/0.log" Jan 28 14:37:36 crc kubenswrapper[4848]: I0128 14:37:36.297415 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_84390cac-21ce-4f4f-98f8-a8371c1742cb/nova-cell0-conductor-conductor/0.log" Jan 28 14:37:36 crc kubenswrapper[4848]: I0128 14:37:36.855482 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_df8d8482-966f-4f20-836e-09bef423d150/nova-cell1-conductor-conductor/0.log" Jan 28 14:37:37 crc kubenswrapper[4848]: I0128 14:37:37.090952 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_a524b9a4-fe08-4675-b873-030d31d75a28/nova-cell1-novncproxy-novncproxy/0.log" Jan 28 14:37:37 crc kubenswrapper[4848]: I0128 14:37:37.573603 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-rhf8n_15c3f5a8-ee6e-41b9-ae9a-ea04d817c86f/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:37 crc kubenswrapper[4848]: I0128 14:37:37.657306 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_68a7a2b4-9e0a-410e-b131-6bf39b7ffa35/nova-api-log/0.log" Jan 28 14:37:37 crc kubenswrapper[4848]: I0128 14:37:37.817633 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_6c953264-454e-4949-906c-25378e467ab4/nova-metadata-log/0.log" Jan 28 14:37:37 crc kubenswrapper[4848]: I0128 14:37:37.924280 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:37:37 crc kubenswrapper[4848]: I0128 14:37:37.924356 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:37:38 crc kubenswrapper[4848]: I0128 14:37:38.465969 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3face43f-5a30-4c86-b004-3a98bb508b55/mysql-bootstrap/0.log" Jan 28 14:37:38 crc kubenswrapper[4848]: I0128 14:37:38.490893 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_68a7a2b4-9e0a-410e-b131-6bf39b7ffa35/nova-api-api/0.log" Jan 28 14:37:38 crc kubenswrapper[4848]: I0128 14:37:38.543017 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_725cd16a-296a-485a-9d15-df106a2c6ebc/nova-scheduler-scheduler/0.log" Jan 28 14:37:38 crc kubenswrapper[4848]: I0128 14:37:38.855111 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3face43f-5a30-4c86-b004-3a98bb508b55/mysql-bootstrap/0.log" Jan 28 14:37:38 crc kubenswrapper[4848]: I0128 14:37:38.855498 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3face43f-5a30-4c86-b004-3a98bb508b55/galera/0.log" Jan 28 14:37:39 crc kubenswrapper[4848]: I0128 14:37:39.062332 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ee209e0b-96f8-46ef-b1ff-2fac23c03ecc/mysql-bootstrap/0.log" Jan 28 14:37:39 crc kubenswrapper[4848]: I0128 14:37:39.311356 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ee209e0b-96f8-46ef-b1ff-2fac23c03ecc/mysql-bootstrap/0.log" Jan 28 14:37:39 crc kubenswrapper[4848]: I0128 14:37:39.397104 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ee209e0b-96f8-46ef-b1ff-2fac23c03ecc/galera/0.log" Jan 28 14:37:39 crc kubenswrapper[4848]: I0128 14:37:39.532039 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_841fc796-225e-424f-bd6c-d3d43c9814d4/openstackclient/0.log" Jan 28 14:37:39 crc kubenswrapper[4848]: I0128 14:37:39.595332 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wk5zd_55d9487c-8ef4-4859-b3ca-6bd679cb1854/openstack-network-exporter/0.log" Jan 28 14:37:39 crc kubenswrapper[4848]: I0128 14:37:39.826793 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovsdb-server-init/0.log" Jan 28 14:37:40 crc kubenswrapper[4848]: I0128 14:37:40.118614 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovsdb-server-init/0.log" Jan 28 14:37:40 crc kubenswrapper[4848]: I0128 14:37:40.193372 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovsdb-server/0.log" Jan 28 14:37:40 crc kubenswrapper[4848]: I0128 14:37:40.497765 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-p6z9h_77e3e961-2cae-4bee-b73a-40336940b35c/ovn-controller/0.log" Jan 28 14:37:40 crc kubenswrapper[4848]: I0128 14:37:40.592536 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-59mkx_49e7ea17-ef5b-4403-ad09-3553928c90e3/ovs-vswitchd/0.log" Jan 28 14:37:40 crc kubenswrapper[4848]: I0128 14:37:40.893742 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-jv56f_ebc674d6-8c77-4481-b022-c91d7c77ec6e/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:40 crc kubenswrapper[4848]: I0128 14:37:40.974936 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_31b7f744-13ea-445d-99a0-57155c52e332/openstack-network-exporter/0.log" Jan 28 14:37:41 crc kubenswrapper[4848]: I0128 14:37:41.125166 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_31b7f744-13ea-445d-99a0-57155c52e332/ovn-northd/0.log" Jan 28 14:37:41 crc kubenswrapper[4848]: I0128 14:37:41.222544 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_6c953264-454e-4949-906c-25378e467ab4/nova-metadata-metadata/0.log" Jan 28 14:37:41 crc kubenswrapper[4848]: I0128 14:37:41.270908 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_38cd06a1-9204-4a3f-bb28-9227a8023af9/openstack-network-exporter/0.log" Jan 28 14:37:41 crc kubenswrapper[4848]: I0128 14:37:41.409411 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_38cd06a1-9204-4a3f-bb28-9227a8023af9/ovsdbserver-nb/0.log" Jan 28 14:37:41 crc kubenswrapper[4848]: I0128 14:37:41.504995 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_59144d8e-c7a9-442f-bcc3-585322a77a97/openstack-network-exporter/0.log" Jan 28 14:37:41 crc kubenswrapper[4848]: I0128 14:37:41.524177 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_59144d8e-c7a9-442f-bcc3-585322a77a97/ovsdbserver-sb/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.043868 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/init-config-reloader/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.052369 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-648cdddfd-q5sbd_8e4ac2f3-a03f-4338-8cd3-188dc4829ea9/placement-api/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.231835 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-648cdddfd-q5sbd_8e4ac2f3-a03f-4338-8cd3-188dc4829ea9/placement-log/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.271046 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/init-config-reloader/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.289077 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/prometheus/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.289593 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/config-reloader/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.486578 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_7bb0f426-1fdb-427a-ad1e-dc5387a1ba01/thanos-sidecar/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.574572 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2255ce73-5019-4b86-b15b-1e390099af55/setup-container/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.768335 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2255ce73-5019-4b86-b15b-1e390099af55/rabbitmq/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.800538 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2255ce73-5019-4b86-b15b-1e390099af55/setup-container/0.log" Jan 28 14:37:42 crc kubenswrapper[4848]: I0128 14:37:42.937722 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_ff062566-cfd3-4393-b794-695d3473ef1a/setup-container/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.132486 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_ff062566-cfd3-4393-b794-695d3473ef1a/setup-container/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.195202 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36728af2-3caa-4d67-bec1-ed4b2d26547c/setup-container/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.235026 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_ff062566-cfd3-4393-b794-695d3473ef1a/rabbitmq/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.565783 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36728af2-3caa-4d67-bec1-ed4b2d26547c/rabbitmq/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.566378 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36728af2-3caa-4d67-bec1-ed4b2d26547c/setup-container/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.628760 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-blwfm_0e12cbe9-44fc-4a05-8bb1-7b5ccbd61898/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.900172 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-w9xdz_c1cb683f-398f-4145-aa62-96ecbb02e82d/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:43 crc kubenswrapper[4848]: I0128 14:37:43.936443 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-pbcf9_0c2e6d21-25c3-4653-bd87-18f42e3a68a5/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:44 crc kubenswrapper[4848]: I0128 14:37:44.196184 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-ntmv9_e8f81366-a592-4a64-b4e7-7d036d232b6b/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:44 crc kubenswrapper[4848]: I0128 14:37:44.230616 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-rzhsb_548fac9b-bd05-42b8-8c88-7c9de08ae4b2/ssh-known-hosts-edpm-deployment/0.log" Jan 28 14:37:44 crc kubenswrapper[4848]: I0128 14:37:44.598811 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77cbfc9c5c-vjds6_8c80f3cf-4e08-4748-95eb-400461e61399/proxy-server/0.log" Jan 28 14:37:44 crc kubenswrapper[4848]: I0128 14:37:44.736996 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-mgbt4_e7ead1bd-9cbb-4b0c-adb8-bf91b66fae1d/swift-ring-rebalance/0.log" Jan 28 14:37:44 crc kubenswrapper[4848]: I0128 14:37:44.844499 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-auditor/0.log" Jan 28 14:37:44 crc kubenswrapper[4848]: I0128 14:37:44.907843 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77cbfc9c5c-vjds6_8c80f3cf-4e08-4748-95eb-400461e61399/proxy-httpd/0.log" Jan 28 14:37:44 crc kubenswrapper[4848]: I0128 14:37:44.990927 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-reaper/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.136401 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-server/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.182872 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-auditor/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.195037 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/account-replicator/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.297604 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-replicator/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.400005 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-server/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.445198 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/container-updater/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.527328 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-auditor/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.530762 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-expirer/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.702883 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-replicator/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.744784 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-server/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.784888 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/object-updater/0.log" Jan 28 14:37:45 crc kubenswrapper[4848]: I0128 14:37:45.919317 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/rsync/0.log" Jan 28 14:37:46 crc kubenswrapper[4848]: I0128 14:37:46.003351 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_af520475-92ee-41e6-90e1-7ad3d9609d51/swift-recon-cron/0.log" Jan 28 14:37:46 crc kubenswrapper[4848]: I0128 14:37:46.144735 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-hkctn_42d08409-a571-40ac-968e-7ac9a5280841/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:46 crc kubenswrapper[4848]: I0128 14:37:46.338126 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_08f6c3e6-eb26-471d-947f-11cb5533c6c8/tempest-tests-tempest-tests-runner/0.log" Jan 28 14:37:46 crc kubenswrapper[4848]: I0128 14:37:46.432930 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_f8682e28-9944-4b82-b3d0-f6e6eca96b93/test-operator-logs-container/0.log" Jan 28 14:37:46 crc kubenswrapper[4848]: I0128 14:37:46.605878 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-67bz6_c4b08279-fe00-4688-8202-88df5280da09/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 14:37:47 crc kubenswrapper[4848]: I0128 14:37:47.593599 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_b7811364-7959-428c-8be5-751c4b25f597/watcher-applier/0.log" Jan 28 14:37:48 crc kubenswrapper[4848]: I0128 14:37:48.532857 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_2c33d357-d7c0-4239-a58e-d882b915fafb/watcher-api-log/0.log" Jan 28 14:37:51 crc kubenswrapper[4848]: I0128 14:37:51.960792 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_c4b63577-cac1-4fce-bdca-c0b5a5d6c646/watcher-decision-engine/0.log" Jan 28 14:37:53 crc kubenswrapper[4848]: I0128 14:37:53.895313 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_2c33d357-d7c0-4239-a58e-d882b915fafb/watcher-api/0.log" Jan 28 14:37:55 crc kubenswrapper[4848]: I0128 14:37:55.164980 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_7e5a41be-973a-4b25-991f-ccbdef21b343/memcached/0.log" Jan 28 14:38:07 crc kubenswrapper[4848]: I0128 14:38:07.925080 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:38:07 crc kubenswrapper[4848]: I0128 14:38:07.925712 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:38:19 crc kubenswrapper[4848]: I0128 14:38:19.919098 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/util/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.112426 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/util/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.160472 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/pull/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.218560 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/pull/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.323216 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/util/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.367654 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/pull/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.374113 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_06e5dcf9f7c324a68087772c38783342817c4689bf97c580d5b11b1a7ajwcvq_eca5d9bf-13bb-40da-b40a-d9d656a0fcff/extract/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.661154 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-7478f7dbf9-cjt92_4747f67c-5dd8-415a-8ff5-c6b43e1142cf/manager/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.677336 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7f86f8796f-dj8qm_f41ee80c-1ab9-4786-8fec-d7b3a12d545b/manager/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.763175 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-9nvdh_b29a79e7-07da-4c52-9798-e279092c28df/manager/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.930224 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-tx7mn_d20ac3bf-9cba-4074-962c-7ad7d7b17174/manager/0.log" Jan 28 14:38:20 crc kubenswrapper[4848]: I0128 14:38:20.987591 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-9jqlp_92cbecbc-09b7-4aa7-8511-dcc241d6b957/manager/0.log" Jan 28 14:38:21 crc kubenswrapper[4848]: I0128 14:38:21.154350 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-g54sg_e535d212-7524-4da1-9905-87af2259c702/manager/0.log" Jan 28 14:38:21 crc kubenswrapper[4848]: I0128 14:38:21.404132 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-598f7747c9-hfnz7_0a7152e1-cedd-465b-a186-9a241ca98141/manager/0.log" Jan 28 14:38:21 crc kubenswrapper[4848]: I0128 14:38:21.498804 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-694cf4f878-gcj9g_fe2e05c6-72db-4981-8b56-dc2a620003f2/manager/0.log" Jan 28 14:38:21 crc kubenswrapper[4848]: I0128 14:38:21.709392 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-v6mn8_dedfeb84-9e8b-46f8-ac8f-0c5a85380160/manager/0.log" Jan 28 14:38:21 crc kubenswrapper[4848]: I0128 14:38:21.725144 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-xb97k_82ac0cb8-c28c-4242-8aa5-817aaf35ea3e/manager/0.log" Jan 28 14:38:21 crc kubenswrapper[4848]: I0128 14:38:21.843261 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6b9fb5fdcb-d2h9f_39a4178e-2251-4cc9-bc57-2b46a5902a3d/manager/0.log" Jan 28 14:38:21 crc kubenswrapper[4848]: I0128 14:38:21.938728 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78d58447c5-qpthc_365e9359-c6e2-428c-8889-95a232bb3e34/manager/0.log" Jan 28 14:38:22 crc kubenswrapper[4848]: I0128 14:38:22.121953 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-7bdb645866-jwvlh_34fd263e-f69d-4cc8-a003-ccb6f12273a6/manager/0.log" Jan 28 14:38:22 crc kubenswrapper[4848]: I0128 14:38:22.167038 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-5f4cd88d46-mckcj_8f0ab1f6-45a7-4731-b418-f9131c97217a/manager/0.log" Jan 28 14:38:22 crc kubenswrapper[4848]: I0128 14:38:22.298423 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854mb5g6_390dea01-5c38-4c87-98c2-32f655af4a62/manager/0.log" Jan 28 14:38:22 crc kubenswrapper[4848]: I0128 14:38:22.417158 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-7db44d5f8c-t26mq_221cef79-cbf0-4a42-baca-872879406257/operator/0.log" Jan 28 14:38:22 crc kubenswrapper[4848]: I0128 14:38:22.680305 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6jnp2_730f88b0-924e-4c06-868f-4baf83bc17a9/registry-server/0.log" Jan 28 14:38:22 crc kubenswrapper[4848]: I0128 14:38:22.911600 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6f75f45d54-csw7g_2c9667bf-ec8d-4064-b52e-e5a0f55f09a3/manager/0.log" Jan 28 14:38:22 crc kubenswrapper[4848]: I0128 14:38:22.948487 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-79d5ccc684-2g2qj_ee8c2e3c-2df5-43aa-b624-e82e4cff81fb/manager/0.log" Jan 28 14:38:23 crc kubenswrapper[4848]: I0128 14:38:23.140085 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-8mp86_04f3b1d4-2f58-42d7-962c-d7a940b93469/operator/0.log" Jan 28 14:38:23 crc kubenswrapper[4848]: I0128 14:38:23.429875 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-cxnsf_1dada58b-0b20-4d23-aa46-164beef54624/manager/0.log" Jan 28 14:38:23 crc kubenswrapper[4848]: I0128 14:38:23.682539 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-5h8th_2801f0da-025c-46a4-a123-6e71c300b025/manager/0.log" Jan 28 14:38:23 crc kubenswrapper[4848]: I0128 14:38:23.766021 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-s8mg8_164ef38a-92cd-4442-8925-509ba68366ba/manager/0.log" Jan 28 14:38:23 crc kubenswrapper[4848]: I0128 14:38:23.787143 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6b67879f4f-c5rbp_ef39eedb-8ccb-47f4-af2c-faee2565e2c9/manager/0.log" Jan 28 14:38:23 crc kubenswrapper[4848]: I0128 14:38:23.932828 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-59c5775db7-r2ppl_0593b76f-9225-457e-9c0f-186dc73f37a3/manager/0.log" Jan 28 14:38:37 crc kubenswrapper[4848]: I0128 14:38:37.924457 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:38:37 crc kubenswrapper[4848]: I0128 14:38:37.925089 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:38:37 crc kubenswrapper[4848]: I0128 14:38:37.925155 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:38:37 crc kubenswrapper[4848]: I0128 14:38:37.926191 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:38:37 crc kubenswrapper[4848]: I0128 14:38:37.926283 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" gracePeriod=600 Jan 28 14:38:38 crc kubenswrapper[4848]: E0128 14:38:38.060812 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:38:38 crc kubenswrapper[4848]: I0128 14:38:38.611089 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" exitCode=0 Jan 28 14:38:38 crc kubenswrapper[4848]: I0128 14:38:38.611167 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef"} Jan 28 14:38:38 crc kubenswrapper[4848]: I0128 14:38:38.611257 4848 scope.go:117] "RemoveContainer" containerID="490c280e1157715c8bda663dcfbeb8881688ac0226ce28dcc3e38c2ea7cab704" Jan 28 14:38:38 crc kubenswrapper[4848]: I0128 14:38:38.612026 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:38:38 crc kubenswrapper[4848]: E0128 14:38:38.612484 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:38:47 crc kubenswrapper[4848]: I0128 14:38:47.312093 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-p4g9c_3daae941-7347-4673-8fef-20c2785a8cd6/control-plane-machine-set-operator/0.log" Jan 28 14:38:47 crc kubenswrapper[4848]: I0128 14:38:47.534743 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-zdq5h_4fa929eb-e746-4253-9cf6-dcb0939da532/kube-rbac-proxy/0.log" Jan 28 14:38:47 crc kubenswrapper[4848]: I0128 14:38:47.572324 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-zdq5h_4fa929eb-e746-4253-9cf6-dcb0939da532/machine-api-operator/0.log" Jan 28 14:38:49 crc kubenswrapper[4848]: I0128 14:38:49.851040 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:38:49 crc kubenswrapper[4848]: E0128 14:38:49.851613 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:39:03 crc kubenswrapper[4848]: I0128 14:39:03.177057 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-c7q52_59ff8003-99d4-4d16-bb2f-6b5ff9ae8ac8/cert-manager-controller/0.log" Jan 28 14:39:03 crc kubenswrapper[4848]: I0128 14:39:03.313305 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-cvf98_09794657-9406-4696-9df8-0f0d782604de/cert-manager-cainjector/0.log" Jan 28 14:39:03 crc kubenswrapper[4848]: I0128 14:39:03.399992 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-dwrr2_4661c13f-0355-4d7e-b7d9-5a3446bfcc17/cert-manager-webhook/0.log" Jan 28 14:39:03 crc kubenswrapper[4848]: I0128 14:39:03.850652 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:39:03 crc kubenswrapper[4848]: E0128 14:39:03.850972 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:39:17 crc kubenswrapper[4848]: I0128 14:39:17.851486 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:39:17 crc kubenswrapper[4848]: E0128 14:39:17.852464 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:39:18 crc kubenswrapper[4848]: I0128 14:39:18.455636 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-76xvb_ae62f49e-2ce4-4e48-b803-2b46a5319273/nmstate-console-plugin/0.log" Jan 28 14:39:18 crc kubenswrapper[4848]: I0128 14:39:18.557995 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-w28lf_88ba0124-029f-4b9c-8479-2ee4c089bcbb/nmstate-handler/0.log" Jan 28 14:39:18 crc kubenswrapper[4848]: I0128 14:39:18.661153 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-7gp2j_b67bb5ba-7747-475f-a3c5-de2b7df72934/kube-rbac-proxy/0.log" Jan 28 14:39:18 crc kubenswrapper[4848]: I0128 14:39:18.743239 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-7gp2j_b67bb5ba-7747-475f-a3c5-de2b7df72934/nmstate-metrics/0.log" Jan 28 14:39:18 crc kubenswrapper[4848]: I0128 14:39:18.887175 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-rbzxj_829bde53-8549-411f-a1ff-a00769198b1c/nmstate-operator/0.log" Jan 28 14:39:18 crc kubenswrapper[4848]: I0128 14:39:18.956938 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-52stv_38d465b1-a9c1-4007-8406-9fd77ec0ead4/nmstate-webhook/0.log" Jan 28 14:39:30 crc kubenswrapper[4848]: I0128 14:39:30.851603 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:39:30 crc kubenswrapper[4848]: E0128 14:39:30.852871 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:39:33 crc kubenswrapper[4848]: I0128 14:39:33.964632 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-pwsdh_021caff7-8415-451a-941e-20d025a0aa2b/prometheus-operator/0.log" Jan 28 14:39:34 crc kubenswrapper[4848]: I0128 14:39:34.166774 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8_40955df6-8a58-487d-98fb-f8632536c72a/prometheus-operator-admission-webhook/0.log" Jan 28 14:39:34 crc kubenswrapper[4848]: I0128 14:39:34.168628 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn_25424d22-6211-41f8-9482-de5ca224224c/prometheus-operator-admission-webhook/0.log" Jan 28 14:39:34 crc kubenswrapper[4848]: I0128 14:39:34.417406 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-hs6jb_ec6c23a2-9920-4672-92c6-c44569e918d4/operator/0.log" Jan 28 14:39:34 crc kubenswrapper[4848]: I0128 14:39:34.468177 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-lh2xv_ff57a0c9-f0c9-4ba1-9166-37cb03178711/perses-operator/0.log" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.614858 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gbqrv"] Jan 28 14:39:35 crc kubenswrapper[4848]: E0128 14:39:35.615460 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="576e59d9-671e-4d47-805c-932887d99d63" containerName="container-00" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.615480 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="576e59d9-671e-4d47-805c-932887d99d63" containerName="container-00" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.615707 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="576e59d9-671e-4d47-805c-932887d99d63" containerName="container-00" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.617348 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.640664 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gbqrv"] Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.675089 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bgpd\" (UniqueName: \"kubernetes.io/projected/38258be6-c051-4b00-82e3-60bebdd600ce-kube-api-access-9bgpd\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.675643 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-utilities\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.676221 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-catalog-content\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.778971 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-catalog-content\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.779081 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bgpd\" (UniqueName: \"kubernetes.io/projected/38258be6-c051-4b00-82e3-60bebdd600ce-kube-api-access-9bgpd\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.779232 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-utilities\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.779683 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-catalog-content\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.779796 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-utilities\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.803904 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bgpd\" (UniqueName: \"kubernetes.io/projected/38258be6-c051-4b00-82e3-60bebdd600ce-kube-api-access-9bgpd\") pod \"redhat-operators-gbqrv\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:35 crc kubenswrapper[4848]: I0128 14:39:35.947627 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:36 crc kubenswrapper[4848]: I0128 14:39:36.614695 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gbqrv"] Jan 28 14:39:36 crc kubenswrapper[4848]: W0128 14:39:36.627047 4848 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38258be6_c051_4b00_82e3_60bebdd600ce.slice/crio-d51af33e15ebdb2c498f9316fb170a33c2ca5d2920e005e7645b88b04f2b62fc WatchSource:0}: Error finding container d51af33e15ebdb2c498f9316fb170a33c2ca5d2920e005e7645b88b04f2b62fc: Status 404 returned error can't find the container with id d51af33e15ebdb2c498f9316fb170a33c2ca5d2920e005e7645b88b04f2b62fc Jan 28 14:39:37 crc kubenswrapper[4848]: I0128 14:39:37.313481 4848 generic.go:334] "Generic (PLEG): container finished" podID="38258be6-c051-4b00-82e3-60bebdd600ce" containerID="17ba3eef18bd089c4bdca751958f2b26f5ceb2d7333e26b883f2a323652065fe" exitCode=0 Jan 28 14:39:37 crc kubenswrapper[4848]: I0128 14:39:37.313548 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbqrv" event={"ID":"38258be6-c051-4b00-82e3-60bebdd600ce","Type":"ContainerDied","Data":"17ba3eef18bd089c4bdca751958f2b26f5ceb2d7333e26b883f2a323652065fe"} Jan 28 14:39:37 crc kubenswrapper[4848]: I0128 14:39:37.313631 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbqrv" event={"ID":"38258be6-c051-4b00-82e3-60bebdd600ce","Type":"ContainerStarted","Data":"d51af33e15ebdb2c498f9316fb170a33c2ca5d2920e005e7645b88b04f2b62fc"} Jan 28 14:39:38 crc kubenswrapper[4848]: I0128 14:39:38.325066 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbqrv" event={"ID":"38258be6-c051-4b00-82e3-60bebdd600ce","Type":"ContainerStarted","Data":"a934f589e0842e9fc3e12be04a8c4412f1983c10b4f94a94829f215ecf3691ea"} Jan 28 14:39:40 crc kubenswrapper[4848]: I0128 14:39:40.361685 4848 generic.go:334] "Generic (PLEG): container finished" podID="38258be6-c051-4b00-82e3-60bebdd600ce" containerID="a934f589e0842e9fc3e12be04a8c4412f1983c10b4f94a94829f215ecf3691ea" exitCode=0 Jan 28 14:39:40 crc kubenswrapper[4848]: I0128 14:39:40.361809 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbqrv" event={"ID":"38258be6-c051-4b00-82e3-60bebdd600ce","Type":"ContainerDied","Data":"a934f589e0842e9fc3e12be04a8c4412f1983c10b4f94a94829f215ecf3691ea"} Jan 28 14:39:43 crc kubenswrapper[4848]: I0128 14:39:43.850686 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:39:43 crc kubenswrapper[4848]: E0128 14:39:43.851576 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:39:45 crc kubenswrapper[4848]: I0128 14:39:45.441415 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbqrv" event={"ID":"38258be6-c051-4b00-82e3-60bebdd600ce","Type":"ContainerStarted","Data":"207783ea1b568ee330c1a634ba511c168e48a751f86ea7510c4bf44e7e975c1e"} Jan 28 14:39:45 crc kubenswrapper[4848]: I0128 14:39:45.463901 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gbqrv" podStartSLOduration=3.002479462 podStartE2EDuration="10.463877589s" podCreationTimestamp="2026-01-28 14:39:35 +0000 UTC" firstStartedPulling="2026-01-28 14:39:37.317304095 +0000 UTC m=+6804.229521133" lastFinishedPulling="2026-01-28 14:39:44.778702222 +0000 UTC m=+6811.690919260" observedRunningTime="2026-01-28 14:39:45.459527989 +0000 UTC m=+6812.371745027" watchObservedRunningTime="2026-01-28 14:39:45.463877589 +0000 UTC m=+6812.376094627" Jan 28 14:39:45 crc kubenswrapper[4848]: I0128 14:39:45.948443 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:45 crc kubenswrapper[4848]: I0128 14:39:45.948605 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:46 crc kubenswrapper[4848]: I0128 14:39:46.996320 4848 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gbqrv" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="registry-server" probeResult="failure" output=< Jan 28 14:39:46 crc kubenswrapper[4848]: timeout: failed to connect service ":50051" within 1s Jan 28 14:39:46 crc kubenswrapper[4848]: > Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.092065 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-tz8dm_ce4dce22-bb0b-4fc3-b724-edbfe04cea8b/kube-rbac-proxy/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.133441 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-tz8dm_ce4dce22-bb0b-4fc3-b724-edbfe04cea8b/controller/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.368640 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.590198 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.599132 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.614488 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.629500 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.807095 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.825408 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.855078 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:39:52 crc kubenswrapper[4848]: I0128 14:39:52.867719 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.045560 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-metrics/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.075835 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-reloader/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.081754 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/cp-frr-files/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.111217 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/controller/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.294548 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/frr-metrics/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.336626 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/kube-rbac-proxy/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.354763 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/kube-rbac-proxy-frr/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.552327 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/reloader/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.606747 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-kdftv_9744680c-1423-4e9a-a285-bca5722378d9/frr-k8s-webhook-server/0.log" Jan 28 14:39:53 crc kubenswrapper[4848]: I0128 14:39:53.901021 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-767fd6bd7f-8fzzq_1f6cf095-4c6d-4e45-9b7f-ce507b1cc72b/manager/0.log" Jan 28 14:39:54 crc kubenswrapper[4848]: I0128 14:39:54.060138 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7d6997b498-j9mdf_a8573d7d-c62b-45f5-9f5c-90a45126a2f4/webhook-server/0.log" Jan 28 14:39:54 crc kubenswrapper[4848]: I0128 14:39:54.258340 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sfdg2_4645d31f-e3e8-4c7a-ace2-c82b88fd7488/kube-rbac-proxy/0.log" Jan 28 14:39:55 crc kubenswrapper[4848]: I0128 14:39:55.699146 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sfdg2_4645d31f-e3e8-4c7a-ace2-c82b88fd7488/speaker/0.log" Jan 28 14:39:55 crc kubenswrapper[4848]: I0128 14:39:55.841366 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rrm9n_9bb36bc6-537d-4853-9367-d38c728c6cc7/frr/0.log" Jan 28 14:39:56 crc kubenswrapper[4848]: I0128 14:39:56.011628 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:56 crc kubenswrapper[4848]: I0128 14:39:56.076072 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:56 crc kubenswrapper[4848]: I0128 14:39:56.259813 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gbqrv"] Jan 28 14:39:57 crc kubenswrapper[4848]: I0128 14:39:57.594659 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gbqrv" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="registry-server" containerID="cri-o://207783ea1b568ee330c1a634ba511c168e48a751f86ea7510c4bf44e7e975c1e" gracePeriod=2 Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.609796 4848 generic.go:334] "Generic (PLEG): container finished" podID="38258be6-c051-4b00-82e3-60bebdd600ce" containerID="207783ea1b568ee330c1a634ba511c168e48a751f86ea7510c4bf44e7e975c1e" exitCode=0 Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.609861 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbqrv" event={"ID":"38258be6-c051-4b00-82e3-60bebdd600ce","Type":"ContainerDied","Data":"207783ea1b568ee330c1a634ba511c168e48a751f86ea7510c4bf44e7e975c1e"} Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.736784 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.850478 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:39:58 crc kubenswrapper[4848]: E0128 14:39:58.850847 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.907078 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-catalog-content\") pod \"38258be6-c051-4b00-82e3-60bebdd600ce\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.907399 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bgpd\" (UniqueName: \"kubernetes.io/projected/38258be6-c051-4b00-82e3-60bebdd600ce-kube-api-access-9bgpd\") pod \"38258be6-c051-4b00-82e3-60bebdd600ce\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.907585 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-utilities\") pod \"38258be6-c051-4b00-82e3-60bebdd600ce\" (UID: \"38258be6-c051-4b00-82e3-60bebdd600ce\") " Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.908435 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-utilities" (OuterVolumeSpecName: "utilities") pod "38258be6-c051-4b00-82e3-60bebdd600ce" (UID: "38258be6-c051-4b00-82e3-60bebdd600ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.908863 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:39:58 crc kubenswrapper[4848]: I0128 14:39:58.919307 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38258be6-c051-4b00-82e3-60bebdd600ce-kube-api-access-9bgpd" (OuterVolumeSpecName: "kube-api-access-9bgpd") pod "38258be6-c051-4b00-82e3-60bebdd600ce" (UID: "38258be6-c051-4b00-82e3-60bebdd600ce"). InnerVolumeSpecName "kube-api-access-9bgpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.012420 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bgpd\" (UniqueName: \"kubernetes.io/projected/38258be6-c051-4b00-82e3-60bebdd600ce-kube-api-access-9bgpd\") on node \"crc\" DevicePath \"\"" Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.026878 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "38258be6-c051-4b00-82e3-60bebdd600ce" (UID: "38258be6-c051-4b00-82e3-60bebdd600ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.114142 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38258be6-c051-4b00-82e3-60bebdd600ce-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.622953 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbqrv" event={"ID":"38258be6-c051-4b00-82e3-60bebdd600ce","Type":"ContainerDied","Data":"d51af33e15ebdb2c498f9316fb170a33c2ca5d2920e005e7645b88b04f2b62fc"} Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.623048 4848 scope.go:117] "RemoveContainer" containerID="207783ea1b568ee330c1a634ba511c168e48a751f86ea7510c4bf44e7e975c1e" Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.623313 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbqrv" Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.670518 4848 scope.go:117] "RemoveContainer" containerID="a934f589e0842e9fc3e12be04a8c4412f1983c10b4f94a94829f215ecf3691ea" Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.672409 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gbqrv"] Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.693051 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gbqrv"] Jan 28 14:39:59 crc kubenswrapper[4848]: I0128 14:39:59.734857 4848 scope.go:117] "RemoveContainer" containerID="17ba3eef18bd089c4bdca751958f2b26f5ceb2d7333e26b883f2a323652065fe" Jan 28 14:40:00 crc kubenswrapper[4848]: I0128 14:40:00.862222 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" path="/var/lib/kubelet/pods/38258be6-c051-4b00-82e3-60bebdd600ce/volumes" Jan 28 14:40:09 crc kubenswrapper[4848]: I0128 14:40:09.911939 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/util/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.169684 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/util/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.193001 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/pull/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.246527 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/pull/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.465854 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/pull/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.508448 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/extract/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.556446 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrpq5d_8a8f5de6-d418-43d7-855b-c4773b3dc691/util/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.706813 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/util/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.850567 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:40:10 crc kubenswrapper[4848]: E0128 14:40:10.850906 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.875410 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/util/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.908777 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/pull/0.log" Jan 28 14:40:10 crc kubenswrapper[4848]: I0128 14:40:10.940947 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/pull/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.198028 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/util/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.198803 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/pull/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.248064 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138fql6_50d9a50c-8ae1-4157-8c81-c32ba250030b/extract/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.406500 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/util/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.599731 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/util/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.617044 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/pull/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.617170 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/pull/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.848726 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/util/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.888968 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/pull/0.log" Jan 28 14:40:11 crc kubenswrapper[4848]: I0128 14:40:11.909122 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f087d4bj_d21dbbe3-7e51-4175-8602-91d4f3d3d8b7/extract/0.log" Jan 28 14:40:12 crc kubenswrapper[4848]: I0128 14:40:12.099980 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-utilities/0.log" Jan 28 14:40:12 crc kubenswrapper[4848]: I0128 14:40:12.259320 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-utilities/0.log" Jan 28 14:40:12 crc kubenswrapper[4848]: I0128 14:40:12.523652 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-content/0.log" Jan 28 14:40:12 crc kubenswrapper[4848]: I0128 14:40:12.532702 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-content/0.log" Jan 28 14:40:12 crc kubenswrapper[4848]: I0128 14:40:12.740330 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-utilities/0.log" Jan 28 14:40:12 crc kubenswrapper[4848]: I0128 14:40:12.800474 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/extract-content/0.log" Jan 28 14:40:12 crc kubenswrapper[4848]: I0128 14:40:12.982208 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-utilities/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.212912 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xmj5z_0faec2cf-cb9a-4c84-b020-2782d1927242/registry-server/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.245659 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-utilities/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.247465 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-content/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.280664 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-content/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.525863 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-content/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.636974 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/extract-utilities/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.766075 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-g5r8p_69959509-efcd-4928-98ad-1dcd656b5513/marketplace-operator/0.log" Jan 28 14:40:13 crc kubenswrapper[4848]: I0128 14:40:13.874196 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-utilities/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.178228 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-content/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.232158 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-utilities/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.290991 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-content/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.488122 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-content/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.500506 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/extract-utilities/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.792426 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-utilities/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.897195 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-fznvv_0a959de4-373b-4ee5-a5ef-425d06ccea02/registry-server/0.log" Jan 28 14:40:14 crc kubenswrapper[4848]: I0128 14:40:14.994208 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9pjsb_1816581b-af94-4067-9cd0-23c9e204bd4c/registry-server/0.log" Jan 28 14:40:15 crc kubenswrapper[4848]: I0128 14:40:15.035165 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-utilities/0.log" Jan 28 14:40:15 crc kubenswrapper[4848]: I0128 14:40:15.042664 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-content/0.log" Jan 28 14:40:15 crc kubenswrapper[4848]: I0128 14:40:15.063198 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-content/0.log" Jan 28 14:40:15 crc kubenswrapper[4848]: I0128 14:40:15.266707 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-utilities/0.log" Jan 28 14:40:15 crc kubenswrapper[4848]: I0128 14:40:15.277664 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/extract-content/0.log" Jan 28 14:40:15 crc kubenswrapper[4848]: I0128 14:40:15.868412 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hjl86_1b5720cb-d35a-4b2b-8462-e18da80b34d0/registry-server/0.log" Jan 28 14:40:24 crc kubenswrapper[4848]: I0128 14:40:24.862463 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:40:24 crc kubenswrapper[4848]: E0128 14:40:24.863710 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:40:31 crc kubenswrapper[4848]: I0128 14:40:31.619067 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-w2gh8_40955df6-8a58-487d-98fb-f8632536c72a/prometheus-operator-admission-webhook/0.log" Jan 28 14:40:31 crc kubenswrapper[4848]: I0128 14:40:31.682593 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f64c68886-hpkcn_25424d22-6211-41f8-9482-de5ca224224c/prometheus-operator-admission-webhook/0.log" Jan 28 14:40:31 crc kubenswrapper[4848]: I0128 14:40:31.706634 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-pwsdh_021caff7-8415-451a-941e-20d025a0aa2b/prometheus-operator/0.log" Jan 28 14:40:31 crc kubenswrapper[4848]: I0128 14:40:31.891751 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-hs6jb_ec6c23a2-9920-4672-92c6-c44569e918d4/operator/0.log" Jan 28 14:40:31 crc kubenswrapper[4848]: I0128 14:40:31.911840 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-lh2xv_ff57a0c9-f0c9-4ba1-9166-37cb03178711/perses-operator/0.log" Jan 28 14:40:37 crc kubenswrapper[4848]: I0128 14:40:37.850751 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:40:37 crc kubenswrapper[4848]: E0128 14:40:37.852093 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:40:50 crc kubenswrapper[4848]: I0128 14:40:50.850805 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:40:50 crc kubenswrapper[4848]: E0128 14:40:50.851897 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:41:01 crc kubenswrapper[4848]: I0128 14:41:01.850320 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:41:01 crc kubenswrapper[4848]: E0128 14:41:01.851199 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:41:13 crc kubenswrapper[4848]: I0128 14:41:13.972889 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7f47z"] Jan 28 14:41:13 crc kubenswrapper[4848]: E0128 14:41:13.973872 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="extract-content" Jan 28 14:41:13 crc kubenswrapper[4848]: I0128 14:41:13.973888 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="extract-content" Jan 28 14:41:13 crc kubenswrapper[4848]: E0128 14:41:13.973911 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="registry-server" Jan 28 14:41:13 crc kubenswrapper[4848]: I0128 14:41:13.973919 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="registry-server" Jan 28 14:41:13 crc kubenswrapper[4848]: E0128 14:41:13.973942 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="extract-utilities" Jan 28 14:41:13 crc kubenswrapper[4848]: I0128 14:41:13.973951 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="extract-utilities" Jan 28 14:41:13 crc kubenswrapper[4848]: I0128 14:41:13.974210 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="38258be6-c051-4b00-82e3-60bebdd600ce" containerName="registry-server" Jan 28 14:41:13 crc kubenswrapper[4848]: I0128 14:41:13.976461 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:13 crc kubenswrapper[4848]: I0128 14:41:13.994795 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7f47z"] Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.134607 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdvt7\" (UniqueName: \"kubernetes.io/projected/5f7374f1-6b85-40c5-b702-ece8000ee302-kube-api-access-cdvt7\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.134699 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-utilities\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.134860 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-catalog-content\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.236507 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdvt7\" (UniqueName: \"kubernetes.io/projected/5f7374f1-6b85-40c5-b702-ece8000ee302-kube-api-access-cdvt7\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.236574 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-utilities\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.236707 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-catalog-content\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.237121 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-catalog-content\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.237620 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-utilities\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.262277 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdvt7\" (UniqueName: \"kubernetes.io/projected/5f7374f1-6b85-40c5-b702-ece8000ee302-kube-api-access-cdvt7\") pod \"certified-operators-7f47z\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.315152 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:14 crc kubenswrapper[4848]: I0128 14:41:14.903841 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7f47z"] Jan 28 14:41:15 crc kubenswrapper[4848]: I0128 14:41:15.530125 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7f47z" event={"ID":"5f7374f1-6b85-40c5-b702-ece8000ee302","Type":"ContainerStarted","Data":"07a03e2a21b9f70f87f74c83bda7792fe74a36fcd51ab86607b40a99d3eb74c5"} Jan 28 14:41:16 crc kubenswrapper[4848]: I0128 14:41:16.540116 4848 generic.go:334] "Generic (PLEG): container finished" podID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerID="a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99" exitCode=0 Jan 28 14:41:16 crc kubenswrapper[4848]: I0128 14:41:16.540218 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7f47z" event={"ID":"5f7374f1-6b85-40c5-b702-ece8000ee302","Type":"ContainerDied","Data":"a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99"} Jan 28 14:41:16 crc kubenswrapper[4848]: I0128 14:41:16.542443 4848 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 14:41:16 crc kubenswrapper[4848]: I0128 14:41:16.849716 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:41:16 crc kubenswrapper[4848]: E0128 14:41:16.850189 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:41:18 crc kubenswrapper[4848]: I0128 14:41:18.559649 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7f47z" event={"ID":"5f7374f1-6b85-40c5-b702-ece8000ee302","Type":"ContainerStarted","Data":"017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2"} Jan 28 14:41:19 crc kubenswrapper[4848]: I0128 14:41:19.569996 4848 generic.go:334] "Generic (PLEG): container finished" podID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerID="017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2" exitCode=0 Jan 28 14:41:19 crc kubenswrapper[4848]: I0128 14:41:19.570182 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7f47z" event={"ID":"5f7374f1-6b85-40c5-b702-ece8000ee302","Type":"ContainerDied","Data":"017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2"} Jan 28 14:41:20 crc kubenswrapper[4848]: I0128 14:41:20.585211 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7f47z" event={"ID":"5f7374f1-6b85-40c5-b702-ece8000ee302","Type":"ContainerStarted","Data":"8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530"} Jan 28 14:41:20 crc kubenswrapper[4848]: I0128 14:41:20.610586 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7f47z" podStartSLOduration=4.189095397 podStartE2EDuration="7.610564027s" podCreationTimestamp="2026-01-28 14:41:13 +0000 UTC" firstStartedPulling="2026-01-28 14:41:16.542084012 +0000 UTC m=+6903.454301050" lastFinishedPulling="2026-01-28 14:41:19.963552642 +0000 UTC m=+6906.875769680" observedRunningTime="2026-01-28 14:41:20.603465294 +0000 UTC m=+6907.515682362" watchObservedRunningTime="2026-01-28 14:41:20.610564027 +0000 UTC m=+6907.522781075" Jan 28 14:41:24 crc kubenswrapper[4848]: I0128 14:41:24.315993 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:24 crc kubenswrapper[4848]: I0128 14:41:24.316604 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:24 crc kubenswrapper[4848]: I0128 14:41:24.412901 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:30 crc kubenswrapper[4848]: I0128 14:41:30.849993 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:41:30 crc kubenswrapper[4848]: E0128 14:41:30.850878 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:41:34 crc kubenswrapper[4848]: I0128 14:41:34.387120 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:34 crc kubenswrapper[4848]: I0128 14:41:34.464000 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7f47z"] Jan 28 14:41:34 crc kubenswrapper[4848]: I0128 14:41:34.716468 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7f47z" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="registry-server" containerID="cri-o://8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530" gracePeriod=2 Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.194733 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.312375 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-utilities\") pod \"5f7374f1-6b85-40c5-b702-ece8000ee302\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.312485 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdvt7\" (UniqueName: \"kubernetes.io/projected/5f7374f1-6b85-40c5-b702-ece8000ee302-kube-api-access-cdvt7\") pod \"5f7374f1-6b85-40c5-b702-ece8000ee302\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.312657 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-catalog-content\") pod \"5f7374f1-6b85-40c5-b702-ece8000ee302\" (UID: \"5f7374f1-6b85-40c5-b702-ece8000ee302\") " Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.314040 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-utilities" (OuterVolumeSpecName: "utilities") pod "5f7374f1-6b85-40c5-b702-ece8000ee302" (UID: "5f7374f1-6b85-40c5-b702-ece8000ee302"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.326561 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f7374f1-6b85-40c5-b702-ece8000ee302-kube-api-access-cdvt7" (OuterVolumeSpecName: "kube-api-access-cdvt7") pod "5f7374f1-6b85-40c5-b702-ece8000ee302" (UID: "5f7374f1-6b85-40c5-b702-ece8000ee302"). InnerVolumeSpecName "kube-api-access-cdvt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.364853 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f7374f1-6b85-40c5-b702-ece8000ee302" (UID: "5f7374f1-6b85-40c5-b702-ece8000ee302"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.414628 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.414659 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdvt7\" (UniqueName: \"kubernetes.io/projected/5f7374f1-6b85-40c5-b702-ece8000ee302-kube-api-access-cdvt7\") on node \"crc\" DevicePath \"\"" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.414669 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f7374f1-6b85-40c5-b702-ece8000ee302-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.738728 4848 generic.go:334] "Generic (PLEG): container finished" podID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerID="8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530" exitCode=0 Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.738771 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7f47z" event={"ID":"5f7374f1-6b85-40c5-b702-ece8000ee302","Type":"ContainerDied","Data":"8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530"} Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.738798 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7f47z" event={"ID":"5f7374f1-6b85-40c5-b702-ece8000ee302","Type":"ContainerDied","Data":"07a03e2a21b9f70f87f74c83bda7792fe74a36fcd51ab86607b40a99d3eb74c5"} Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.738817 4848 scope.go:117] "RemoveContainer" containerID="8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.738878 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7f47z" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.767660 4848 scope.go:117] "RemoveContainer" containerID="017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.811620 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7f47z"] Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.815051 4848 scope.go:117] "RemoveContainer" containerID="a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.828241 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7f47z"] Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.890475 4848 scope.go:117] "RemoveContainer" containerID="8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530" Jan 28 14:41:35 crc kubenswrapper[4848]: E0128 14:41:35.890965 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530\": container with ID starting with 8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530 not found: ID does not exist" containerID="8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.891035 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530"} err="failed to get container status \"8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530\": rpc error: code = NotFound desc = could not find container \"8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530\": container with ID starting with 8ac912fc51cdd73669cb93f010614babc3c79ed26039f98e735c371c0d43d530 not found: ID does not exist" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.891088 4848 scope.go:117] "RemoveContainer" containerID="017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2" Jan 28 14:41:35 crc kubenswrapper[4848]: E0128 14:41:35.891816 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2\": container with ID starting with 017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2 not found: ID does not exist" containerID="017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.891893 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2"} err="failed to get container status \"017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2\": rpc error: code = NotFound desc = could not find container \"017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2\": container with ID starting with 017cf3634ad703f95d36f68c490760cd14688b697623f04774da3b5369296dc2 not found: ID does not exist" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.891941 4848 scope.go:117] "RemoveContainer" containerID="a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99" Jan 28 14:41:35 crc kubenswrapper[4848]: E0128 14:41:35.892447 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99\": container with ID starting with a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99 not found: ID does not exist" containerID="a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99" Jan 28 14:41:35 crc kubenswrapper[4848]: I0128 14:41:35.892489 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99"} err="failed to get container status \"a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99\": rpc error: code = NotFound desc = could not find container \"a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99\": container with ID starting with a3621e8e4fbf2664229518d6fffcebf343eefc8ec83f9fc4d5e3e8b667a7ab99 not found: ID does not exist" Jan 28 14:41:36 crc kubenswrapper[4848]: I0128 14:41:36.866095 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" path="/var/lib/kubelet/pods/5f7374f1-6b85-40c5-b702-ece8000ee302/volumes" Jan 28 14:41:43 crc kubenswrapper[4848]: I0128 14:41:43.850735 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:41:43 crc kubenswrapper[4848]: E0128 14:41:43.851474 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:41:58 crc kubenswrapper[4848]: I0128 14:41:58.854751 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:41:58 crc kubenswrapper[4848]: E0128 14:41:58.856072 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:42:13 crc kubenswrapper[4848]: I0128 14:42:13.850576 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:42:13 crc kubenswrapper[4848]: E0128 14:42:13.852129 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:42:25 crc kubenswrapper[4848]: I0128 14:42:25.849812 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:42:25 crc kubenswrapper[4848]: E0128 14:42:25.850689 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:42:36 crc kubenswrapper[4848]: I0128 14:42:36.853234 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:42:36 crc kubenswrapper[4848]: E0128 14:42:36.856929 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:42:44 crc kubenswrapper[4848]: I0128 14:42:44.579347 4848 generic.go:334] "Generic (PLEG): container finished" podID="b9fee44b-257e-4daa-9611-3216ef6be666" containerID="1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388" exitCode=0 Jan 28 14:42:44 crc kubenswrapper[4848]: I0128 14:42:44.579407 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ffllp/must-gather-zvt4j" event={"ID":"b9fee44b-257e-4daa-9611-3216ef6be666","Type":"ContainerDied","Data":"1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388"} Jan 28 14:42:44 crc kubenswrapper[4848]: I0128 14:42:44.585800 4848 scope.go:117] "RemoveContainer" containerID="1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388" Jan 28 14:42:45 crc kubenswrapper[4848]: I0128 14:42:45.492076 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ffllp_must-gather-zvt4j_b9fee44b-257e-4daa-9611-3216ef6be666/gather/0.log" Jan 28 14:42:48 crc kubenswrapper[4848]: I0128 14:42:48.851319 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:42:48 crc kubenswrapper[4848]: E0128 14:42:48.852787 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.904464 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wm44g"] Jan 28 14:42:52 crc kubenswrapper[4848]: E0128 14:42:52.905499 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="extract-utilities" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.905516 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="extract-utilities" Jan 28 14:42:52 crc kubenswrapper[4848]: E0128 14:42:52.905551 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="extract-content" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.905562 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="extract-content" Jan 28 14:42:52 crc kubenswrapper[4848]: E0128 14:42:52.905588 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="registry-server" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.905596 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="registry-server" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.905842 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f7374f1-6b85-40c5-b702-ece8000ee302" containerName="registry-server" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.908164 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.934597 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wm44g"] Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.951091 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwsjf\" (UniqueName: \"kubernetes.io/projected/2cc1bd60-2e23-469d-8986-16f8444e9277-kube-api-access-wwsjf\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.951181 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-utilities\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:52 crc kubenswrapper[4848]: I0128 14:42:52.951402 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-catalog-content\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.053401 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwsjf\" (UniqueName: \"kubernetes.io/projected/2cc1bd60-2e23-469d-8986-16f8444e9277-kube-api-access-wwsjf\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.053465 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-utilities\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.053557 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-catalog-content\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.054126 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-catalog-content\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.054153 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-utilities\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.079418 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwsjf\" (UniqueName: \"kubernetes.io/projected/2cc1bd60-2e23-469d-8986-16f8444e9277-kube-api-access-wwsjf\") pod \"community-operators-wm44g\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.242748 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:42:53 crc kubenswrapper[4848]: I0128 14:42:53.830798 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wm44g"] Jan 28 14:42:54 crc kubenswrapper[4848]: I0128 14:42:54.729060 4848 generic.go:334] "Generic (PLEG): container finished" podID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerID="8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa" exitCode=0 Jan 28 14:42:54 crc kubenswrapper[4848]: I0128 14:42:54.729165 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wm44g" event={"ID":"2cc1bd60-2e23-469d-8986-16f8444e9277","Type":"ContainerDied","Data":"8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa"} Jan 28 14:42:54 crc kubenswrapper[4848]: I0128 14:42:54.729570 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wm44g" event={"ID":"2cc1bd60-2e23-469d-8986-16f8444e9277","Type":"ContainerStarted","Data":"9f7324cf268cc2573ffd973feb5ed41694e86be5a5d8b48e3068ce8d6d71e05d"} Jan 28 14:42:55 crc kubenswrapper[4848]: I0128 14:42:55.746841 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wm44g" event={"ID":"2cc1bd60-2e23-469d-8986-16f8444e9277","Type":"ContainerStarted","Data":"795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1"} Jan 28 14:42:56 crc kubenswrapper[4848]: I0128 14:42:56.757602 4848 generic.go:334] "Generic (PLEG): container finished" podID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerID="795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1" exitCode=0 Jan 28 14:42:56 crc kubenswrapper[4848]: I0128 14:42:56.757664 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wm44g" event={"ID":"2cc1bd60-2e23-469d-8986-16f8444e9277","Type":"ContainerDied","Data":"795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1"} Jan 28 14:42:56 crc kubenswrapper[4848]: I0128 14:42:56.954337 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ffllp/must-gather-zvt4j"] Jan 28 14:42:56 crc kubenswrapper[4848]: I0128 14:42:56.954754 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-ffllp/must-gather-zvt4j" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" containerName="copy" containerID="cri-o://765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c" gracePeriod=2 Jan 28 14:42:56 crc kubenswrapper[4848]: I0128 14:42:56.969315 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ffllp/must-gather-zvt4j"] Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.458301 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ffllp_must-gather-zvt4j_b9fee44b-257e-4daa-9611-3216ef6be666/copy/0.log" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.459000 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.558286 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b9fee44b-257e-4daa-9611-3216ef6be666-must-gather-output\") pod \"b9fee44b-257e-4daa-9611-3216ef6be666\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.558437 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdtsh\" (UniqueName: \"kubernetes.io/projected/b9fee44b-257e-4daa-9611-3216ef6be666-kube-api-access-qdtsh\") pod \"b9fee44b-257e-4daa-9611-3216ef6be666\" (UID: \"b9fee44b-257e-4daa-9611-3216ef6be666\") " Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.582765 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9fee44b-257e-4daa-9611-3216ef6be666-kube-api-access-qdtsh" (OuterVolumeSpecName: "kube-api-access-qdtsh") pod "b9fee44b-257e-4daa-9611-3216ef6be666" (UID: "b9fee44b-257e-4daa-9611-3216ef6be666"). InnerVolumeSpecName "kube-api-access-qdtsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.660718 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdtsh\" (UniqueName: \"kubernetes.io/projected/b9fee44b-257e-4daa-9611-3216ef6be666-kube-api-access-qdtsh\") on node \"crc\" DevicePath \"\"" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.773925 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wm44g" event={"ID":"2cc1bd60-2e23-469d-8986-16f8444e9277","Type":"ContainerStarted","Data":"bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a"} Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.778864 4848 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ffllp_must-gather-zvt4j_b9fee44b-257e-4daa-9611-3216ef6be666/copy/0.log" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.779190 4848 generic.go:334] "Generic (PLEG): container finished" podID="b9fee44b-257e-4daa-9611-3216ef6be666" containerID="765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c" exitCode=143 Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.779256 4848 scope.go:117] "RemoveContainer" containerID="765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.779304 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ffllp/must-gather-zvt4j" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.793840 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9fee44b-257e-4daa-9611-3216ef6be666-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b9fee44b-257e-4daa-9611-3216ef6be666" (UID: "b9fee44b-257e-4daa-9611-3216ef6be666"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.802495 4848 scope.go:117] "RemoveContainer" containerID="1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.823780 4848 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wm44g" podStartSLOduration=3.266693851 podStartE2EDuration="5.823747823s" podCreationTimestamp="2026-01-28 14:42:52 +0000 UTC" firstStartedPulling="2026-01-28 14:42:54.7318994 +0000 UTC m=+7001.644116458" lastFinishedPulling="2026-01-28 14:42:57.288953392 +0000 UTC m=+7004.201170430" observedRunningTime="2026-01-28 14:42:57.805922806 +0000 UTC m=+7004.718139874" watchObservedRunningTime="2026-01-28 14:42:57.823747823 +0000 UTC m=+7004.735964881" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.865888 4848 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b9fee44b-257e-4daa-9611-3216ef6be666-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.915992 4848 scope.go:117] "RemoveContainer" containerID="765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c" Jan 28 14:42:57 crc kubenswrapper[4848]: E0128 14:42:57.916489 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c\": container with ID starting with 765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c not found: ID does not exist" containerID="765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.916527 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c"} err="failed to get container status \"765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c\": rpc error: code = NotFound desc = could not find container \"765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c\": container with ID starting with 765c980ec81cc6f5c6fb7678a010c3914105ab04834fbf2d0dd12c30c09aa91c not found: ID does not exist" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.916549 4848 scope.go:117] "RemoveContainer" containerID="1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388" Jan 28 14:42:57 crc kubenswrapper[4848]: E0128 14:42:57.916853 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388\": container with ID starting with 1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388 not found: ID does not exist" containerID="1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388" Jan 28 14:42:57 crc kubenswrapper[4848]: I0128 14:42:57.916881 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388"} err="failed to get container status \"1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388\": rpc error: code = NotFound desc = could not find container \"1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388\": container with ID starting with 1df331e6a7dbda5873dbe084e02a387caf2fc41106e6300883592ebcfb841388 not found: ID does not exist" Jan 28 14:42:58 crc kubenswrapper[4848]: I0128 14:42:58.860189 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" path="/var/lib/kubelet/pods/b9fee44b-257e-4daa-9611-3216ef6be666/volumes" Jan 28 14:43:02 crc kubenswrapper[4848]: I0128 14:43:02.851871 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:43:02 crc kubenswrapper[4848]: E0128 14:43:02.854638 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:43:03 crc kubenswrapper[4848]: I0128 14:43:03.243392 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:43:03 crc kubenswrapper[4848]: I0128 14:43:03.243863 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:43:03 crc kubenswrapper[4848]: I0128 14:43:03.335508 4848 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:43:03 crc kubenswrapper[4848]: I0128 14:43:03.893753 4848 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:43:03 crc kubenswrapper[4848]: I0128 14:43:03.988060 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wm44g"] Jan 28 14:43:05 crc kubenswrapper[4848]: I0128 14:43:05.870773 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wm44g" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="registry-server" containerID="cri-o://bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a" gracePeriod=2 Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.532785 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.693488 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwsjf\" (UniqueName: \"kubernetes.io/projected/2cc1bd60-2e23-469d-8986-16f8444e9277-kube-api-access-wwsjf\") pod \"2cc1bd60-2e23-469d-8986-16f8444e9277\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.693578 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-utilities\") pod \"2cc1bd60-2e23-469d-8986-16f8444e9277\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.693813 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-catalog-content\") pod \"2cc1bd60-2e23-469d-8986-16f8444e9277\" (UID: \"2cc1bd60-2e23-469d-8986-16f8444e9277\") " Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.694884 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-utilities" (OuterVolumeSpecName: "utilities") pod "2cc1bd60-2e23-469d-8986-16f8444e9277" (UID: "2cc1bd60-2e23-469d-8986-16f8444e9277"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.704704 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cc1bd60-2e23-469d-8986-16f8444e9277-kube-api-access-wwsjf" (OuterVolumeSpecName: "kube-api-access-wwsjf") pod "2cc1bd60-2e23-469d-8986-16f8444e9277" (UID: "2cc1bd60-2e23-469d-8986-16f8444e9277"). InnerVolumeSpecName "kube-api-access-wwsjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.797553 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwsjf\" (UniqueName: \"kubernetes.io/projected/2cc1bd60-2e23-469d-8986-16f8444e9277-kube-api-access-wwsjf\") on node \"crc\" DevicePath \"\"" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.797603 4848 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.883486 4848 generic.go:334] "Generic (PLEG): container finished" podID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerID="bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a" exitCode=0 Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.883527 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wm44g" event={"ID":"2cc1bd60-2e23-469d-8986-16f8444e9277","Type":"ContainerDied","Data":"bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a"} Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.883543 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wm44g" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.883578 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wm44g" event={"ID":"2cc1bd60-2e23-469d-8986-16f8444e9277","Type":"ContainerDied","Data":"9f7324cf268cc2573ffd973feb5ed41694e86be5a5d8b48e3068ce8d6d71e05d"} Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.883613 4848 scope.go:117] "RemoveContainer" containerID="bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.910184 4848 scope.go:117] "RemoveContainer" containerID="795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1" Jan 28 14:43:06 crc kubenswrapper[4848]: I0128 14:43:06.932129 4848 scope.go:117] "RemoveContainer" containerID="8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.011193 4848 scope.go:117] "RemoveContainer" containerID="bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a" Jan 28 14:43:07 crc kubenswrapper[4848]: E0128 14:43:07.011769 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a\": container with ID starting with bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a not found: ID does not exist" containerID="bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.011819 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a"} err="failed to get container status \"bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a\": rpc error: code = NotFound desc = could not find container \"bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a\": container with ID starting with bb178299c180f7d6dfe3ff786bd8b085407f3ab04f30d6df0b66c8d6733e339a not found: ID does not exist" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.011855 4848 scope.go:117] "RemoveContainer" containerID="795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1" Jan 28 14:43:07 crc kubenswrapper[4848]: E0128 14:43:07.012310 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1\": container with ID starting with 795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1 not found: ID does not exist" containerID="795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.012370 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1"} err="failed to get container status \"795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1\": rpc error: code = NotFound desc = could not find container \"795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1\": container with ID starting with 795ecd79d386bb694b11656522adfc5e0f35be742308214f448a2fcbe1a45ea1 not found: ID does not exist" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.012395 4848 scope.go:117] "RemoveContainer" containerID="8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa" Jan 28 14:43:07 crc kubenswrapper[4848]: E0128 14:43:07.012654 4848 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa\": container with ID starting with 8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa not found: ID does not exist" containerID="8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.012691 4848 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa"} err="failed to get container status \"8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa\": rpc error: code = NotFound desc = could not find container \"8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa\": container with ID starting with 8408a664bc052c6932142a770ce5d44fb1dd74818272afa4946e40ed4796d0aa not found: ID does not exist" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.034865 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2cc1bd60-2e23-469d-8986-16f8444e9277" (UID: "2cc1bd60-2e23-469d-8986-16f8444e9277"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.105381 4848 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cc1bd60-2e23-469d-8986-16f8444e9277-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.228791 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wm44g"] Jan 28 14:43:07 crc kubenswrapper[4848]: I0128 14:43:07.240573 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wm44g"] Jan 28 14:43:08 crc kubenswrapper[4848]: I0128 14:43:08.875572 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" path="/var/lib/kubelet/pods/2cc1bd60-2e23-469d-8986-16f8444e9277/volumes" Jan 28 14:43:17 crc kubenswrapper[4848]: I0128 14:43:17.850455 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:43:17 crc kubenswrapper[4848]: E0128 14:43:17.851361 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:43:31 crc kubenswrapper[4848]: I0128 14:43:31.851619 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:43:31 crc kubenswrapper[4848]: E0128 14:43:31.852835 4848 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vfhvz_openshift-machine-config-operator(30570a21-e260-4494-89cd-2643cb0ca288)\"" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" Jan 28 14:43:42 crc kubenswrapper[4848]: I0128 14:43:42.850724 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" Jan 28 14:43:43 crc kubenswrapper[4848]: I0128 14:43:43.303626 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"1fa87925a5fa6630e078274d925ab4b3b2d445e183a7c1c9af12835cff85c9df"} Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.158418 4848 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb"] Jan 28 14:45:00 crc kubenswrapper[4848]: E0128 14:45:00.159450 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" containerName="gather" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159465 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" containerName="gather" Jan 28 14:45:00 crc kubenswrapper[4848]: E0128 14:45:00.159481 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="extract-content" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159486 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="extract-content" Jan 28 14:45:00 crc kubenswrapper[4848]: E0128 14:45:00.159513 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" containerName="copy" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159520 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" containerName="copy" Jan 28 14:45:00 crc kubenswrapper[4848]: E0128 14:45:00.159541 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="extract-utilities" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159547 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="extract-utilities" Jan 28 14:45:00 crc kubenswrapper[4848]: E0128 14:45:00.159556 4848 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="registry-server" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159563 4848 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="registry-server" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159745 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cc1bd60-2e23-469d-8986-16f8444e9277" containerName="registry-server" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159767 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" containerName="gather" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.159775 4848 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9fee44b-257e-4daa-9611-3216ef6be666" containerName="copy" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.160524 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.162472 4848 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.162690 4848 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.189211 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb"] Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.309596 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w26bh\" (UniqueName: \"kubernetes.io/projected/a373e3a0-7324-42c5-ac6e-1da851aabd22-kube-api-access-w26bh\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.309716 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a373e3a0-7324-42c5-ac6e-1da851aabd22-config-volume\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.309831 4848 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a373e3a0-7324-42c5-ac6e-1da851aabd22-secret-volume\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.416637 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w26bh\" (UniqueName: \"kubernetes.io/projected/a373e3a0-7324-42c5-ac6e-1da851aabd22-kube-api-access-w26bh\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.416721 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a373e3a0-7324-42c5-ac6e-1da851aabd22-config-volume\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.416828 4848 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a373e3a0-7324-42c5-ac6e-1da851aabd22-secret-volume\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.418047 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a373e3a0-7324-42c5-ac6e-1da851aabd22-config-volume\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.432050 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a373e3a0-7324-42c5-ac6e-1da851aabd22-secret-volume\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.439043 4848 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w26bh\" (UniqueName: \"kubernetes.io/projected/a373e3a0-7324-42c5-ac6e-1da851aabd22-kube-api-access-w26bh\") pod \"collect-profiles-29493525-8p7gb\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.499635 4848 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:00 crc kubenswrapper[4848]: I0128 14:45:00.984910 4848 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb"] Jan 28 14:45:01 crc kubenswrapper[4848]: I0128 14:45:01.138942 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" event={"ID":"a373e3a0-7324-42c5-ac6e-1da851aabd22","Type":"ContainerStarted","Data":"397f3bca41f3b1068a59951f1ad216b88f207cb6aa79e92054a5c9fd0cb20b96"} Jan 28 14:45:02 crc kubenswrapper[4848]: I0128 14:45:02.151643 4848 generic.go:334] "Generic (PLEG): container finished" podID="a373e3a0-7324-42c5-ac6e-1da851aabd22" containerID="5006feca3cef145be911ce2ebe9132697d079eed936cebea7f954607bf1b2832" exitCode=0 Jan 28 14:45:02 crc kubenswrapper[4848]: I0128 14:45:02.151757 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" event={"ID":"a373e3a0-7324-42c5-ac6e-1da851aabd22","Type":"ContainerDied","Data":"5006feca3cef145be911ce2ebe9132697d079eed936cebea7f954607bf1b2832"} Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.608276 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.717414 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w26bh\" (UniqueName: \"kubernetes.io/projected/a373e3a0-7324-42c5-ac6e-1da851aabd22-kube-api-access-w26bh\") pod \"a373e3a0-7324-42c5-ac6e-1da851aabd22\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.717503 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a373e3a0-7324-42c5-ac6e-1da851aabd22-secret-volume\") pod \"a373e3a0-7324-42c5-ac6e-1da851aabd22\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.717614 4848 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a373e3a0-7324-42c5-ac6e-1da851aabd22-config-volume\") pod \"a373e3a0-7324-42c5-ac6e-1da851aabd22\" (UID: \"a373e3a0-7324-42c5-ac6e-1da851aabd22\") " Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.718635 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a373e3a0-7324-42c5-ac6e-1da851aabd22-config-volume" (OuterVolumeSpecName: "config-volume") pod "a373e3a0-7324-42c5-ac6e-1da851aabd22" (UID: "a373e3a0-7324-42c5-ac6e-1da851aabd22"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.718895 4848 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a373e3a0-7324-42c5-ac6e-1da851aabd22-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.726296 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a373e3a0-7324-42c5-ac6e-1da851aabd22-kube-api-access-w26bh" (OuterVolumeSpecName: "kube-api-access-w26bh") pod "a373e3a0-7324-42c5-ac6e-1da851aabd22" (UID: "a373e3a0-7324-42c5-ac6e-1da851aabd22"). InnerVolumeSpecName "kube-api-access-w26bh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.728557 4848 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a373e3a0-7324-42c5-ac6e-1da851aabd22-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a373e3a0-7324-42c5-ac6e-1da851aabd22" (UID: "a373e3a0-7324-42c5-ac6e-1da851aabd22"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.820634 4848 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w26bh\" (UniqueName: \"kubernetes.io/projected/a373e3a0-7324-42c5-ac6e-1da851aabd22-kube-api-access-w26bh\") on node \"crc\" DevicePath \"\"" Jan 28 14:45:03 crc kubenswrapper[4848]: I0128 14:45:03.821193 4848 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a373e3a0-7324-42c5-ac6e-1da851aabd22-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 14:45:04 crc kubenswrapper[4848]: I0128 14:45:04.187961 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" event={"ID":"a373e3a0-7324-42c5-ac6e-1da851aabd22","Type":"ContainerDied","Data":"397f3bca41f3b1068a59951f1ad216b88f207cb6aa79e92054a5c9fd0cb20b96"} Jan 28 14:45:04 crc kubenswrapper[4848]: I0128 14:45:04.188015 4848 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="397f3bca41f3b1068a59951f1ad216b88f207cb6aa79e92054a5c9fd0cb20b96" Jan 28 14:45:04 crc kubenswrapper[4848]: I0128 14:45:04.188045 4848 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493525-8p7gb" Jan 28 14:45:04 crc kubenswrapper[4848]: I0128 14:45:04.700812 4848 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz"] Jan 28 14:45:04 crc kubenswrapper[4848]: I0128 14:45:04.712832 4848 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493480-c4bmz"] Jan 28 14:45:04 crc kubenswrapper[4848]: I0128 14:45:04.869980 4848 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="357aa36c-134b-431b-b041-5f04284770d0" path="/var/lib/kubelet/pods/357aa36c-134b-431b-b041-5f04284770d0/volumes" Jan 28 14:45:08 crc kubenswrapper[4848]: I0128 14:45:08.822586 4848 scope.go:117] "RemoveContainer" containerID="7bb417d16cf5331595b8200c90138283c8e98d20ec52e7116aefa87dbdf9d0fe" Jan 28 14:46:07 crc kubenswrapper[4848]: I0128 14:46:07.924695 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:46:07 crc kubenswrapper[4848]: I0128 14:46:07.925459 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:46:37 crc kubenswrapper[4848]: I0128 14:46:37.925333 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:46:37 crc kubenswrapper[4848]: I0128 14:46:37.926279 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:47:07 crc kubenswrapper[4848]: I0128 14:47:07.924600 4848 patch_prober.go:28] interesting pod/machine-config-daemon-vfhvz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 14:47:07 crc kubenswrapper[4848]: I0128 14:47:07.926152 4848 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 14:47:07 crc kubenswrapper[4848]: I0128 14:47:07.926652 4848 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" Jan 28 14:47:07 crc kubenswrapper[4848]: I0128 14:47:07.927556 4848 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1fa87925a5fa6630e078274d925ab4b3b2d445e183a7c1c9af12835cff85c9df"} pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 14:47:07 crc kubenswrapper[4848]: I0128 14:47:07.927694 4848 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" podUID="30570a21-e260-4494-89cd-2643cb0ca288" containerName="machine-config-daemon" containerID="cri-o://1fa87925a5fa6630e078274d925ab4b3b2d445e183a7c1c9af12835cff85c9df" gracePeriod=600 Jan 28 14:47:08 crc kubenswrapper[4848]: I0128 14:47:08.563490 4848 generic.go:334] "Generic (PLEG): container finished" podID="30570a21-e260-4494-89cd-2643cb0ca288" containerID="1fa87925a5fa6630e078274d925ab4b3b2d445e183a7c1c9af12835cff85c9df" exitCode=0 Jan 28 14:47:08 crc kubenswrapper[4848]: I0128 14:47:08.563584 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerDied","Data":"1fa87925a5fa6630e078274d925ab4b3b2d445e183a7c1c9af12835cff85c9df"} Jan 28 14:47:08 crc kubenswrapper[4848]: I0128 14:47:08.563913 4848 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vfhvz" event={"ID":"30570a21-e260-4494-89cd-2643cb0ca288","Type":"ContainerStarted","Data":"92c67459cb57452e8aa17a81b89795effea47e528259b0d1d6354b2a22f045a1"} Jan 28 14:47:08 crc kubenswrapper[4848]: I0128 14:47:08.563942 4848 scope.go:117] "RemoveContainer" containerID="2906ab5838b3fdc14ea356259bd879e672ded4a63c9111411ac2e314037f3bef" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136420630024445 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136420631017363 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136402015016502 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136402015015452 5ustar corecore